From 73355bc82e38212e9480e67665670d26bc2c791a Mon Sep 17 00:00:00 2001
From: Eli <43382407+eli64s@users.noreply.github.com>
Date: Sun, 22 Oct 2023 21:02:32 -0500
Subject: [PATCH] Add additional repo metadata to llm prompts.
---
.gitignore | 3 +-
docs/features.md | 2 +-
docs/overview.md | 3 +
examples/markdown/readme-python.md | 439 ++++++++++++++++------------
pyproject.toml | 2 +-
readmeai/config/settings.py | 2 +-
readmeai/core/model.py | 17 +-
readmeai/core/tokens.py | 2 +-
readmeai/main.py | 19 +-
readmeai/settings/config.toml | 35 ++-
readmeai/settings/ignore_files.toml | 1 +
readmeai/utils/utils.py | 20 +-
12 files changed, 320 insertions(+), 225 deletions(-)
diff --git a/.gitignore b/.gitignore
index 0a663336..a5578868 100644
--- a/.gitignore
+++ b/.gitignore
@@ -45,9 +45,10 @@ notebooks/
.benchmarks/
# Other
+templates/
docs/docs
docs/notes
+docs/flow.md
examples/markdown/readme-edgecase.md
readmeai/settings/prompts.toml
readmeai/markdown/data/badges.json
-templates/
diff --git a/docs/features.md b/docs/features.md
index 880aad1b..e2b07536 100644
--- a/docs/features.md
+++ b/docs/features.md
@@ -1,4 +1,4 @@
-## Key Features
+## Features
diff --git a/docs/overview.md b/docs/overview.md
index f66af5b9..aa942e61 100644
--- a/docs/overview.md
+++ b/docs/overview.md
@@ -1,4 +1,7 @@
# README-AI
+---
## Why README-AI?
+
+---
diff --git a/examples/markdown/readme-python.md b/examples/markdown/readme-python.md
index 52b95b9e..a0b73d47 100644
--- a/examples/markdown/readme-python.md
+++ b/examples/markdown/readme-python.md
@@ -1,47 +1,43 @@
-
---
-## ๐ Table of Contents
-- [๐ Table of Contents](#-table-of-contents)
+## ๐ Table of Contents
+- [๐ Table of Contents](#-table-of-contents)
- [๐ Overview](#-overview)
-- [โ๏ธ Features](#๏ธ-features)
-- [๐ Project Structure](#-project-structure)
-- [๐งฉ Modules](#-modules)
+- [๐ฆ Features](#-features)
+- [๐ repository Structure](#-repository-structure)
+- [โ๏ธ Modules](#modules)
- [๐ Getting Started](#-getting-started)
- - [โ๏ธ Prerequisites](#๏ธ-prerequisites)
- - [๐ป Installation](#-installation)
- - [๐ฎ Using readme-ai](#-using-readme-ai)
- - [๐งช Running Tests](#-running-tests)
-- [๐บ Roadmap](#-roadmap)
+ - [๐ง Installation](#-installation)
+ - [๐ค Running readme-ai](#-running-readme-ai)
+ - [๐งช Tests](#-tests)
+- [๐ฃ Roadmap](#-roadmap)
- [๐ค Contributing](#-contributing)
- [๐ License](#-license)
- [๐ Acknowledgments](#-acknowledgments)
@@ -51,163 +47,227 @@ readme-ai
## ๐ Overview
-The project README-AI is a tool that automatically generates high-level summaries for codebases using OpenAI's text generation model. It analyzes code repositories, extracts information about dependencies and file structure, and generates a Markdown file with a comprehensive summary of the codebase. The tool saves developers time by providing them with a ready-to-use README file that includes key information about the project. Its value proposition lies in its ability to quickly produce informative and standardized project documentation, making it easier for developers to understand, use, and collaborate on codebases.
+The readme-ai repository is a powerful tool that automates the generation of high-quality README files for projects. It uses advanced natural language processing techniques to analyze the codebase and extract relevant information. With its intuitive command-line interface, users can quickly create comprehensive README files that accurately describe their project's purpose, features, and installation instructions. By saving time and ensuring consistency, readme-ai enhances the documentation process and improves project visibility.
---
-## โ๏ธ Features
-
-| Feature | Description |
-| ---------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| **โ๏ธ Architecture** | The codebase follows a modular architecture, with different files and modules responsible for specific tasks. The use of factories, wrappers, and handlers promotes code organization and separation of concerns. |
-| **๐ Documentation** | The codebase is well-documented, with detailed explanations of each file's purpose and functionality. The provided summaries are informative and help understand the codebase. |
-| **๐ Dependencies** | The code relies on external libraries such as pandas and requests for data manipulation and HTTP requests. It uses Git, Python, conda, and additional tools like Tree and SnakeViz for development processes. |
-| **๐งฉ Modularity** | The codebase demonstrates modularity by separating functionalities into different modules and classes. Each module focuses on one aspect of the project, which makes the codebase more maintainable and reusable. |
-| **โ๏ธ Testing** | The codebase has a testing strategy, as evidenced by the presence of a test script. It uses the pytest framework for generating a test coverage report and the SnakeViz tool for analyzing the profiled output. |
-| **โก๏ธ Performance** | The overall performance of the codebase appears to be efficient. The code utilizes Pandas for data manipulation, performance profiling, and optimization techniques to ensure good performance. |
-| **๐ Security** | The codebase does not directly handle security-related functionality. However, it generates an OpenAI API key, and handling it securely is important to protect access to the AI model. |
-| **๐ Version Control** | The code repository uses Git for version control. It provides a version history, facilitates collaboration, and allows for easy branching and merging of code changes. |
-| **๐ Integrations** | The codebase integrates with external services and tools such as the OpenAI API, Git repositories, and Docker containers. These integrations enhance the functionality and flexibility of the application. |
-| **๐ถ Scalability** | The codebase does not explicitly address scalability. However, by using modular design and external services, the application can potentially handle growth and can be extended to support additional features in the future. |
+## ๐ฆ Features
+
+| | Feature | Description |
+|----|--------------------|--------------------------------------------------------------------------------------------------------------------|
+| โ๏ธ | **Architecture** | The codebase follows a modular architecture, with components organized into separate directories based on their functionality. The core functionality is implemented in the `core` directory, while the CLI commands are implemented in the `cli` directory. |
+| ๐ | **Documentation** | The documentation appears to be lacking or incomplete. There is no dedicated documentation directory, and it is not clear if there are any external documentation resources available. |
+| ๐ | **Dependencies** | The codebase relies on external dependencies managed through Poetry. The specific dependencies are listed in the `poetry.lock` file. |
+| ๐งฉ | **Modularity** | The codebase is organized into smaller components, allowing for easier development and maintenance. Each component is responsible for a specific functionality, such as parsing, preprocessing, and generating Markdown. |
+| ๐งช | **Testing** | The codebase does not include any testing strategies or tools. There are no test directories or test scripts available. Implementing testing would improve code reliability and maintainability. |
+| โก๏ธ | **Performance** | The codebase does not have any specific performance optimizations. However, the usage of Python and its libraries can generally offer good performance for a command-line tool like this. |
+| ๐ | **Security** | There is no evidence of specific security measures implemented in the codebase. However, since this tool primarily operates on local files, the security risks should be low. |
+| ๐ | **Version Control**| The codebase includes version control strategies through its use of Git. The .github directory contains workflows related to build, publish, and release processes. |
+| ๐ | **Integrations** | The codebase does not have any explicit integrations with external systems or services. It primarily operates on local file systems. |
+| ๐ถ | **Scalability** | The codebase does not explicitly address scalability considerations. However, since it is a command-line tool, scalability might not be a significant concern.
---
-## ๐ Project Structure
-
-
-```bash
-repo
-โโโ CODE_OF_CONDUCT.md
-โโโ CONTRIBUTING.md
-โโโ Dockerfile
-โโโ LICENSE
-โโโ Makefile
-โโโ README.md
-โโโ conf
-โย ย โโโ conf.toml
-โย ย โโโ dependency_files.toml
-โย ย โโโ ignore_files.toml
-โย ย โโโ language_names.toml
-โย ย โโโ language_setup.toml
-โย ย โโโ svg
-โย ย โโโ badges.json
-โย ย โโโ badges_compressed.json
-โโโ examples
-โย ย โโโ imgs
-โย ย โย ย โโโ closing.png
-โย ย โย ย โโโ demo.png
-โย ย โย ย โโโ features.png
-โย ย โย ย โโโ getting_started.png
-โย ย โย ย โโโ header.png
-โย ย โย ย โโโ modules.png
-โย ย โย ย โโโ overview.png
-โย ย โย ย โโโ tree.png
-โย ย โโโ readme-c.md
-โย ย โโโ readme-energy-forecasting.md
-โย ย โโโ readme-fastapi-redis.md
-โย ย โโโ readme-fastapi.md
-โย ย โโโ readme-gitlab.md
-โย ย โโโ readme-go-bash.md
-โย ย โโโ readme-go.md
-โย ย โโโ readme-java.md
-โย ย โโโ readme-javascript-gpt.md
-โย ย โโโ readme-javascript.md
-โย ย โโโ readme-kotlin.md
-โย ย โโโ readme-lanarky.md
-โย ย โโโ readme-mlops.md
-โย ย โโโ readme-pyflink.md
-โย ย โโโ readme-python-ml.md
-โย ย โโโ readme-python.md
-โย ย โโโ readme-react.md
-โย ย โโโ readme-rust-c.md
-โย ย โโโ readme-rust.md
-โย ย โโโ readme-typescript.md
-โโโ poetry.lock
-โโโ pyproject.toml
-โโโ requirements.txt
-โโโ scripts
-โย ย โโโ clean.sh
-โย ย โโโ run.sh
-โย ย โโโ run_batch.sh
-โย ย โโโ test.sh
-โโโ setup
-โย ย โโโ environment.yaml
-โย ย โโโ setup.sh
-โโโ setup.py
-โโโ src
-โย ย โโโ __init__.py
-โย ย โโโ builder.py
-โย ย โโโ conf.py
-โย ย โโโ factory.py
-โย ย โโโ logger.py
-โย ย โโโ main.py
-โย ย โโโ model.py
-โย ย โโโ parse.py
-โย ย โโโ preprocess.py
-โย ย โโโ utils.py
-โโโ tests
- โโโ __init__.py
- โโโ conftest.py
- โโโ test_builder.py
- โโโ test_conf.py
- โโโ test_factory.py
- โโโ test_logger.py
- โโโ test_main.py
- โโโ test_model.py
- โโโ test_parse.py
- โโโ test_preprocess.py
- โโโ test_utils.py
-
-9 directories, 72 files
+## ๐ Repository Structure
+
+```sh
+โโโ readmeai/
+ โโโ .github/
+ โ โโโ release-drafter.yml
+ โ โโโ workflows/
+ โ โโโ build_image.yml
+ โ โโโ publish_package.yml
+ โ โโโ release-drafter.yml
+ โโโ Dockerfile
+ โโโ Makefile
+ โโโ examples/
+ โ โโโ images/
+ โ โโโ markdown/
+ โโโ poetry.lock
+ โโโ pyproject.toml
+ โโโ readmeai/
+ โ โโโ cli/
+ โ โ โโโ commands.py
+ โ โ โโโ options.py
+ โ โโโ config/
+ โ โ โโโ __Init__.py
+ โ โ โโโ settings.py
+ โ โโโ core/
+ โ โ โโโ factory.py
+ โ โ โโโ logger.py
+ โ โ โโโ model.py
+ โ โ โโโ parser.py
+ โ โ โโโ preprocess.py
+ โ โ โโโ tokens.py
+ โ โโโ main.py
+ โ โโโ markdown/
+ โ โ โโโ badges.py
+ โ โ โโโ headers.py
+ โ โ โโโ quickstart.py
+ โ โ โโโ tables.py
+ โ โ โโโ template.py
+ โ โ โโโ tree.py
+ โ โโโ services/
+ โ โ โโโ version_control.py
+ โ โโโ settings/
+ โ โ โโโ config.toml
+ โ โ โโโ dependency_files.toml
+ โ โ โโโ identifiers.toml
+ โ โ โโโ ignore_files.toml
+ โ โ โโโ language_names.toml
+ โ โ โโโ language_setup.toml
+ โ โโโ utils/
+ โ โโโ utils.py
+ โโโ requirements.txt
+ โโโ scripts/
+ โ โโโ build_image.sh
+ โ โโโ build_pypi.sh
+ โ โโโ clean.sh
+ โ โโโ run.sh
+ โ โโโ run_batch.sh
+ โ โโโ test.sh
+ โโโ setup/
+ โ โโโ environment.yaml
+ โ โโโ setup.sh
+
```
---
-## ๐งฉ Modules
+
+## โ๏ธ Modules
Root
-| File | Summary |
-| --- | --- |
-| [Dockerfile](https://github.com/eli64s/readme-ai/blob/main/Dockerfile) | This Dockerfile sets up a Python environment in a container by installing Git and Python dependencies, copying project files, and running a main.py file as the entry point command. |
-| [Makefile](https://github.com/eli64s/readme-ai/blob/main/Makefile) | This makefile provides commands to facilitate code formatting, cleaning, creating conda and virtual environments, profiling code using cProfile, and analyzing the profiled output using SnakeViz. |
-| [setup.py](https://github.com/eli64s/readme-ai/blob/main/setup.py) | The provided code is a setup script for the README-AI package. It defines the required packages, author information, project details, and dependencies. It also specifies keywords, classifiers, and project URLs. |
+| File | Summary |
+| --- | --- |
+| [requirements.txt](https://github.com/eli64s/readme-ai/blob/main/requirements.txt) | The code in "requirements.txt" contains a list of Python packages and libraries needed for the project. These packages include black, click, colorlog, cachetools, flake8, gitpython, httpx, h2, isort, openai, pre-commit, pydantic, pyyaml, pytest, pytest-cov, responses, ruff, tabulate, tenacity, tiktoken, and toml. These packages provide various functionalities such as formatting, logging, version control, HTTP requests, testing, and more. |
+| [Dockerfile](https://github.com/eli64s/readme-ai/blob/main/Dockerfile) | The provided Dockerfile sets up a Docker container environment with Python 3.9 installed. It installs system dependencies, creates a non-root user, sets permissions, and adds the directory for user scripts to the PATH. It then installs the "readmeai" package from PyPI with a pinned version and sets the command to run the "readmeai" CLI. |
+| [Makefile](https://github.com/eli64s/readme-ai/blob/main/Makefile) | The Makefile provides a set of commands for repository file management and development tasks. It includes commands for cleaning up the repository files, executing code formatting and linting, installing requirements, creating a conda or virtual environment, and fixing git untracked files. These commands are executed by running the corresponding targets in the Makefile using the make command. |
+| [pyproject.toml](https://github.com/eli64s/readme-ai/blob/main/pyproject.toml) | The code is a configuration file written in TOML format for the "readmeai" Python project. It specifies the project's metadata, dependencies, scripts, and development tools. It also defines the project's dev dependencies, test configurations, and code formatting rules. |
+| [poetry.lock](https://github.com/eli64s/readme-ai/blob/main/poetry.lock) | The code consists of a directory tree containing several files and folders. It includes a Dockerfile and a Makefile for building and running the code, as well as example files in the "examples" folder. The "readmeai" folder contains various modules for different functionalities, such as CLI commands, configuration settings, core functionalities like logging and parsing, and markdown-related operations. It also includes services for version control and utility functions. The "settings" folder contains configuration files in TOML format. Additionally, there are shell scripts for various tasks like building images and running the code. |
Setup
-| File | Summary |
-| --- | --- |
-| [setup.sh](https://github.com/eli64s/readme-ai/blob/main/setup/setup.sh) | This code snippet checks if a conda environment exists, installs the "tree" command if it's not installed, ensures the presence of Git and Python 3.7+, creates a conda environment named "readme_ai" with Python 3.8 and installs required packages. |
+| File | Summary |
+| --- | --- |
+| [setup.sh](https://github.com/eli64s/readme-ai/blob/main/setup/setup.sh) | The code is a setup script for the README-AI project. It checks if the'tree' command is installed and installs it if necessary. Then, it checks if Git and Conda are installed and exits if they're not.Next, it checks if Python 3.8 or higher is installed and exits if it's not.If the conda environment'readmeai' does not exist, it creates it using the'environment.yaml' file.The'readmeai' environment is then activated, and the Python path is added to the PATH environment variable.The required packages from the'requirements.txt' file are installed using pip.Finally, the conda environment is deactivated, and the setup is completed. |
+| [environment.yaml](https://github.com/eli64s/readme-ai/blob/main/setup/environment.yaml) | The code represents the environment setup for a project called "readmeai". It specifies the name of the project, channels to use (conda-forge and defaults), and dependencies required for the project. This includes Python version 3.9 or higher, installation of pip, and an empty pip section. |
Scripts
-| File | Summary |
-| --- | --- |
-| [run_batch.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/run_batch.sh) | The code snippet is a Bash script that loops through a list of GitHub repository URLs and runs a Python script called "main.py" for each repository. The Python script takes the repository URL as input and generates a markdown file named "readme-[repository_name].md" as output. |
-| [run.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/run.sh) | This code snippet is a Bash script that activates a conda environment named "readme_ai" and runs a Python script called "main.py". The script sets the options to exit on error and fail on pipe failures at the beginning. It also exports environment variables, if needed, and requires an OpenAI API key to be set. |
-| [clean.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/clean.sh) | This code snippet is a Bash script that removes unwanted files and directories commonly found in a Python project. It deletes backup files, Python cache files, cache directories, VS Code settings, build artifacts, pytest cache, benchmark files, and specific files like logs and raw data. |
-| [test.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/test.sh) | The provided code snippet activates a Conda environment named readme_ai. It generates a coverage report using pytest, excluding specified directories and files. It sets a minimum coverage threshold of 90%. After generating the report, it removes unnecessary files and folders. |
+| File | Summary |
+| --- | --- |
+| [run_batch.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/run_batch.sh) | The code provided is a bash script that automates the generation of README files for a list of GitHub repositories. It uses the `readmeai` Python package to create Markdown files based on the repository's URL. The script iterates over a predefined list of repository URLs, extracts the repository name, and passes the URL to the `readmeai.cli.commands` module along with output file options to create a Markdown file for each repository. |
+| [build_image.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/build_image.sh) | The code in the `build_image.sh` script is used to build and push a Docker image. It sets the image name and version, creates a new build context using Docker BuildKit, pulls the necessary dependencies, and builds the image for multiple platforms. Finally, it pushes the built image to a Docker registry. |
+| [build_pypi.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/build_pypi.sh) | The code in the `build_pypi.sh` script performs the following core functionalities:1. Cleans the project by running the `clean.sh` script.2. Builds the project using the `python-m build` command.3. Uploads the built files to the PyPI repository using `twine upload`.4. Prints a success message indicating that the `readmeai` package has been successfully pushed to PyPI. |
+| [run.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/run.sh) | The code in `scripts/run.sh` activates the `readmeai` conda environment and then runs the `readmeai` Python script using the `python3` command. The script takes arguments `-o readme-ai.md` and `-r https://github.com/eli64s/readme-ai`. It also includes commented out lines to export environment variables if needed. |
+| [clean.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/clean.sh) | The `clean.sh` script is a bash script that provides several functions for cleaning different artifacts and files in a Python project. The `clean()` function removes build, test, coverage, and Python artifacts. The `clean_build()` function removes build artifacts. The `clean_pyc()` function removes Python file artifacts. The `clean_test()` function removes test and coverage artifacts. The `clean_backup_and_cache()` function removes backup files and Python cache files. The script provides a command line interface to invoke these functions based on the provided command. |
+| [test.sh](https://github.com/eli64s/readme-ai/blob/main/scripts/test.sh) | The code in `scripts/test.sh` is a bash script that performs the following tasks:1. Activates the conda environment named "readmeai".2. Sets the directories to include in the coverage report (`source_dir` variable).3. Sets the directories to exclude from the coverage report (`omit_dir` variable).4. Sets the specific file to omit from the coverage report (`omit_file` variable).5. Runs pytest with coverage enabled, specifying the source and omit directories.6. Generates a coverage report that shows missing lines and fails if the coverage is below 90%.7. Removes unnecessary files and folders, such as `__pycache__`, `.pytest_cache`, `.coverage`, and any file or folder named "*local_dir*" under the `tests` directory. |
+
+
+
+
.github
+
+| File | Summary |
+| --- | --- |
+| [release-drafter.yml](https://github.com/eli64s/readme-ai/blob/main/.github/release-drafter.yml) | The code is a configuration file for the release drafter tool. It defines the template for generating release notes based on the conventions of Keep a Changelog. It sets up categories for different types of changes (features, bug fixes, etc.) and assigns corresponding labels. It provides a customizable template for the release notes, including placeholders for the version number and the changes made. It also configures a version resolver to determine the appropriate version based on the labels applied to issues or pull requests. |
+
+
+
+
Workflows
+
+| File | Summary |
+| --- | --- |
+| [release-drafter.yml](https://github.com/eli64s/readme-ai/blob/main/.github/workflows/release-drafter.yml) | This code is a GitHub Actions workflow file that automates the process of creating release drafts for a repository. It listens for push and pull request events on the "main" branch and runs the release-drafter action. The action is responsible for drafting release notes based on merged pull requests. It requires write permissions to create releases and pull requests. The workflow runs on an Ubuntu environment and uses the release-drafter@v5 action with the provided configuration name and GitHub token secret. |
+| [publish_package.yml](https://github.com/eli64s/readme-ai/blob/main/.github/workflows/publish_package.yml) | The code is a GitHub workflow that automates the process of publishing a Python package to PyPI (Python Package Index). It runs on the "main" branch and also when a new release is created. The workflow consists of several steps:1. Checking out the code from the repository.2. Setting up Python 3.x.3. Installing necessary dependencies (pip, build, twine).4. Building the package using the "python-m build" command.5. Publishing the package to PyPI using the "python-m twine upload" command. The access credentials are provided through environment variables (TWINE_USERNAME and TWINE_PASSWORD), where the password is sourced from the PyPI API token stored in GitHub secrets. |
+| [build_image.yml](https://github.com/eli64s/readme-ai/blob/main/.github/workflows/build_image.yml) | The code in.github/workflows/build_image.yml sets up a Docker image building workflow. It triggers on pushes to the main branch and on new releases. It uses various GitHub Actions to accomplish the following steps: performing a checkout of the repository, setting up QEMU and Docker Buildx, logging in to Docker Hub, building the Docker image, and pushing it to the Docker registry with specified tags. |
+
+
+
+
Readmeai
+
+| File | Summary |
+| --- | --- |
+| [main.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/main.py) | The code is the main entrypoint for an application called readme-ai. It orchestrates the generation process of a README file. It loads configuration settings, retrieves the repository tree structure, parses the codebase to get dependencies and file information, generates code summaries using OpenAI Language Models (LLMs), and builds the README.md file with headers and content based on the retrieved information. The code supports offline mode and outputs logs during the execution process. |
+
+
+
+
Settings
+
+| File | Summary |
+| --- | --- |
+| [ignore_files.toml](https://github.com/eli64s/readme-ai/blob/main/readmeai/settings/ignore_files.toml) | The code above represents the content of the `ignore_files.toml` file in the `readmeai/settings/` directory. This file is used to specify files, directories, and file extensions that should be ignored or excluded. It provides a comprehensive list of these items, including common files and directories that are often excluded in software projects. The code includes lists of ignored directories, extensions, and files, enabling users to easily customize their project's ignore list. |
+| [language_names.toml](https://github.com/eli64s/readme-ai/blob/main/readmeai/settings/language_names.toml) | The code is a configuration file that maps programming language file extensions to their corresponding names. It provides a list of file extensions and their associated programming languages, allowing for easy identification and categorization of files based on their extensions. |
+| [identifiers.toml](https://github.com/eli64s/readme-ai/blob/main/readmeai/settings/identifiers.toml) | The code provided contains a configuration file called "identifiers.toml" that defines different identifiers for various project types. The identifiers are organized into different categories such as web, mobile, desktop, backend, frontend, game, data, machine learning, library, CLI, API, plugin, and embedded. Each category contains a list of keywords that are commonly associated with projects of that type. These identifiers can be used to analyze and classify projects based on their file and directory structure. |
+| [config.toml](https://github.com/eli64s/readme-ai/blob/main/readmeai/settings/config.toml) | The code defines various settings and prompts for the readmeai project. It includes API settings, version control system URLs, CLI options, Git options, file paths, and Markdown template code. The settings determine the behavior of the readmeai project, while the prompts are structured templates for generating content. The codebase overview prompt requests a technical analysis of the project, file summaries, and core functionalities summarized in a Markdown table. |
+| [dependency_files.toml](https://github.com/eli64s/readme-ai/blob/main/readmeai/settings/dependency_files.toml) | The code provides a configuration file (`dependency_files.toml`) that specifies the dependency file names for various programming languages. These file names are used by the `readmeai` application to identify and track the dependencies of a project. The file lists a wide range of known dependency file names for different languages, such as `requirements.txt` for Python, `package.json` for JavaScript, and `build.gradle` for Java. |
+| [language_setup.toml](https://github.com/eli64s/readme-ai/blob/main/readmeai/settings/language_setup.toml) | The code represents a language setup and run instructions configuration file. It contains a mapping of programming languages to the commands needed to compile, run, and test code in each language. |
+
+
+
+
Core
+
+| File | Summary |
+| --- | --- |
+| [preprocess.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/core/preprocess.py) | The code is a part of the "readmeai" project and specifically focuses on the preprocessing of an input codebase. It provides functionalities to analyze a local or remote git repository, generate a list of file information, extract dependency file contents, retrieve the file contents from a list of dictionaries, extract unique contents from the list, extract the dependencies of a user's repository, map file extensions to programming languages, and tokenize the content of each file. The code is organized into classes and functions that handle these different tasks. |
+| [tokens.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/core/tokens.py) | The code in the `tokens.py` file provides utilities for handling tokenization. It includes functions to adjust the maximum number of tokens based on a specific prompt, get the number of tokens in a text string, determine the token encoder to use for the model, and truncate a text string to a maximum number of tokens. These utilities are used for preprocessing and manipulating text data in the `readmeai` project. |
+| [logger.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/core/logger.py) | The `Logger` class is a custom logger implementation that allows for logging messages at different levels (info, debug, warning, error, critical). It configures the logger with a colored formatter and log colors for each level. The logger can be instantiated with a name and a logging level, and provides methods for logging messages at different levels. |
+| [factory.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/core/factory.py) | The `FileHandler` class in the `factory.py` file is a factory class that handles file input and output operations. It provides methods to read and write content from/to different file types such as JSON, Markdown, TOML, TXT, and YAML. The class dynamically determines the appropriate read/write methods based on the file extension. It also includes exception handling to raise custom exceptions when file operations fail. Additionally, the class maintains a cache of file contents to improve performance when reading the same file multiple times. |
+| [model.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/core/model.py) | The code represents an OpenAI API handler that generates text for the README.md file. It has functionalities to convert code to natural language text, generate text using prompts, and handle API requests to generate text. It includes methods for initializing the handler, converting code to text, generating text from prompts, handling API requests, and closing the HTTP client. |
+| [parser.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/core/parser.py) | The code in `readmeai/core/parser.py` provides methods for parsing and extracting dependency file metadata. It includes functions for parsing various file types such as JSON, TOML, YAML, Docker Compose, Pipfile, Pyproject.toml, requirements.txt, Cargo.toml, etc. Each parse function takes in the content of the file and returns a list of dependencies extracted from the file. The code also includes utility functions for parsing specific fields from JSON and TOML files. |
-
Src
+Config
-| File | Summary |
-| --- | --- |
-| [preprocess.py](https://github.com/eli64s/readme-ai/blob/main/src/preprocess.py) | The provided code snippet is a module for preprocessing a codebase. It includes classes for wrapping the repository parser, analyzing local or remote git repositories, generating file information, tokenizing content, mapping file extensions to programming languages, and extracting dependency file contents. The code handles various file formats and uses pandas for data manipulation. |
-| [conf.py](https://github.com/eli64s/readme-ai/blob/main/src/conf.py) | The code snippet defines multiple data classes that store configuration constants for README-AI. It includes classes for API configuration, Git repository configuration, Markdown configuration, paths to configuration files, prompts configuration, and the main application configuration. Additionally, there is a helper class that loads and stores additional configuration settings from TOML files. Overall, the code organizes and manages configuration constants necessary for README-AI. |
-| [logger.py](https://github.com/eli64s/readme-ai/blob/main/src/logger.py) | The provided code snippet defines a custom logger class that wraps the functionality of the logging module. It supports multiple log levels, such as info, debug, warning, error, and critical, and uses a colored formatter for log messages. The class ensures that only one instance of the logger is created and allows logging to be configured with different log levels. |
-| [factory.py](https://github.com/eli64s/readme-ai/blob/main/src/factory.py) | This code snippet provides a factory class that handles file input/output operations. It supports reading and writing different file types such as Markdown, TOML, JSON, and YAML. It offers methods to read and write files, and it also handles caching to improve performance. It raises specific exceptions when there are errors in file operations. |
-| [model.py](https://github.com/eli64s/readme-ai/blob/main/src/model.py) | The provided code snippet is a handler for making requests to the OpenAI API to generate text for the README.md file. It includes functionality for converting code to natural language text and generating text using prompts. The code also handles rate limiting, caching, and error handling. |
-| [builder.py](https://github.com/eli64s/readme-ai/blob/main/src/builder.py) | The code snippet builds a README Markdown file for a codebase. It creates different sections including badges, directory tree, modules, setup guide, and more. It uses information from the configuration file and generates Markdown tables from code summaries. It also retrieves and formats badges for project dependencies. |
-| [utils.py](https://github.com/eli64s/readme-ai/blob/main/src/utils.py) | This code snippet provides utility methods for the README-AI tool. It includes functions for cloning and validating Git repositories, extracting username and repository name from GitHub URLs, adjusting maximum tokens for prompts, token counting, text truncation, file/url validation, list flattening, and text formatting. |
-| [parse.py](https://github.com/eli64s/readme-ai/blob/main/src/parse.py) | The provided code snippet consists of a collection of methods that parse different file formats to extract dependencies. This includes files such as docker-compose.yaml, conda environment files, Pipfile, Pipfile.lock, pyproject.toml, requirements.txt, Cargo.toml, Cargo.lock, package.json, yarn.lock, package-lock.json, go.mod, build.gradle, pom.xml, CMakeLists.txt, configure.ac, and Makefile.am. The methods take the content of the files as input and return a list of dependencies extracted from the files. Each method handles the parsing logic for a specific file format. |
-| [main.py](https://github.com/eli64s/readme-ai/blob/main/src/main.py) | This code snippet is the main entrypoint for the README-AI application. It takes command line arguments for the OpenAI API key, output file path, and repository URL or path. It validates the API key and repository, then it generates a README.md file by orchestrating the generation process using an OpenAI model. The generated README contains a code summary, slogan, overview, and features. Finally, it formats the text and builds the markdown file. |
+| File | Summary |
+| --- | --- |
+| [__Init__.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/config/__Init__.py) | The code in `readmeai/config/__Init__.py` serves as the initialization file for the `config` module in the `readmeai` directory. It includes the necessary code to initialize and configure settings related to the project, such as importing the `settings.py` module. |
+| [settings.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/config/settings.py) | The code defines Pydantic models for the README AI application's configuration settings. It includes models for API configuration, CLI options, Git repository configuration, Markdown code block templates, file paths, prompts, and the overall application configuration. The code also includes a helper class to load additional configuration files. There are functions to load the main configuration and the configuration helper. |
+
+
+
+Markdown
+
+| File | Summary |
+| --- | --- |
+| [tree.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/markdown/tree.py) | The code generates a tree structure for a given directory. It includes a class called `TreeGenerator`, which takes in a root directory, repository URL, project name, and optional maximum depth to generate the tree. The `generate_and_format_tree` method generates and formats the tree structure using the `_generate_tree` and `_format_tree` methods. The `_generate_tree` method recursively traverses the directory, adding directory and file names to the tree structure, while the `_format_tree` method formats the final tree structure with the project name. |
+| [badges.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/markdown/badges.py) | The code in the `badges.py` file generates badges for the README file. It uses icons from the Shields.io and app iOS styles to create the badges. The code reads the icons from resource files, filters them based on the project's dependencies, and formats them into Markdown image tags. The badges are sorted by color and displayed in multiple lines if there are more than 8 badges. The generated badges are returned as a string. |
+| [template.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/markdown/template.py) | The code defines a directory tree structure for a project called "readmeai". It includes various files and directories related to the project. The `template.py` file defines classes for generating a readme file based on different project types. The `ReadmeTemplate` abstract class provides a blueprint for subclasses to implement. The `LibraryTemplate` and `WebTemplate` subclasses override certain sections of the readme template. The `Project` class holds information about the project. The `gen_readme` function generates a readme file based on the project type. Finally, an example is provided to demonstrate the usage of the code. |
+| [tables.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/markdown/tables.py) | The code contains functions that create Markdown tables used to format the code summaries generated by the LLM (Language Model) tool. The `create_markdown_tables` function formats the code summaries into a list, while the `create_tables` function creates Markdown tables for each sub-directory in the project. The code also includes utility functions to create and format the Markdown tables. Finally, the `build_recursive_tables` function recursively builds a Markdown table structure for a given directory. |
+| [headers.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/markdown/headers.py) | This code builds the README Markdown file for a codebase. It constructs various sections of the README by formatting the contents for each section. It creates badges, tables, and a setup guide, and removes emojis from headers and the Table of Contents if specified. The generated README file is written to the output path specified in the configuration. |
+| [quickstart.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/markdown/quickstart.py) | The code in the `quickstart.py` file is responsible for creating the'Getting Started' section of a README file. It uses the provided configuration and helper objects to determine the default installation, run, and test commands. It also analyzes the summaries list to determine the most frequently used programming language and retrieve the corresponding setup guide. If an error occurs, it falls back to using the default run command. The function then returns the default installation, run, and test commands as a tuple. |
+
+
+
+Utils
+
+| File | Summary |
+| --- | --- |
+| [utils.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/utils/utils.py) | The code in utils.py provides utility methods for the readme-ai application. It includes functions such as filtering out files to ignore, checking if a string is a valid URL, flattening a nested list, formatting text, and removing text between HTML tags. The code uses regular expressions and the pathlib library for file handling. |
+
+
+
+Cli
+
+| File | Summary |
+| --- | --- |
+| [options.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/cli/options.py) | The code defines various options for the command line interface of a program called "readmeai". These options include API key, badges, emojis, model, offline mode, output file path, repository, temperature, language, and style. These options can be used to customize the generation of a README.md file using the "readmeai" program. |
+| [commands.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/cli/commands.py) | The code in the file `commands.py` defines the CLI commands for a tool called `readme-ai`. It uses the `click` library to create command-line options for the tool. The main function `commands` accepts several optional arguments and invokes the `main` function from another module. The purpose of the `readme-ai` tool is to generate a README file for a GitHub repository based on various options specified by the user. |
+
+
+
+Services
+
+| File | Summary |
+| --- | --- |
+| [version_control.py](https://github.com/eli64s/readme-ai/blob/main/readmeai/services/version_control.py) | The code in `version_control.py` provides Git-related utilities for the `readme-ai` project. It includes functions for making HTTP requests to retrieve metadata about a GitHub, GitLab, or Bitbucket repository, cloning a repository to a temporary directory, finding the path to the git executable, validating the file permissions of a cloned repository, getting the file URL for a given file based on the platform, extracting the user and repository name from a URL or path, and parsing the repository URL to construct the API URL. |
@@ -215,14 +275,17 @@ repo
## ๐ Getting Started
-### โ๏ธ Prerequisites
+***Dependencies***
+
+Please ensure you have the following dependencies installed on your system:
+
+`- โน๏ธ Dependency 1`
+
+`- โน๏ธ Dependency 2`
-Before you begin, ensure that you have the following prerequisites installed:
-> - `โน๏ธ Requirement 1`
-> - `โน๏ธ Requirement 2`
-> - `โน๏ธ ...`
+`- โน๏ธ ...`
-### ๐ป Installation
+### ๐ง Installation
1. Clone the readme-ai repository:
```sh
@@ -239,13 +302,13 @@ cd readme-ai
pip install -r requirements.txt
```
-### ๐ฎ Using readme-ai
+### ๐ค Running readme-ai
```sh
python main.py
```
-### ๐งช Running Tests
+### ๐งช Tests
```sh
pytest
```
@@ -253,10 +316,10 @@ pytest
---
-## ๐บ Roadmap
+## ๐ฃ Project Roadmap
> - [X] `โน๏ธ Task 1: Implement X`
-> - [ ] `โน๏ธ Task 2: Refactor Y`
+> - [ ] `โน๏ธ Task 2: Implement Y`
> - [ ] `โน๏ธ ...`
@@ -264,35 +327,47 @@ pytest
## ๐ค Contributing
-Contributions are always welcome! Please follow these steps:
-1. Fork the project repository. This creates a copy of the project on your account that you can modify without affecting the original project.
+[**Discussions**](https://github.com/eli64s/readme-ai/discussions)
+ - Join the discussion here.
+
+[**New Issue**](https://github.com/eli64s/readme-ai/issues)
+ - Report a bug or request a feature here.
+
+[**Contributing Guidelines**](https://github.com/eli64s/readme-ai/blob/main/CONTRIBUTING.md)
+
+- Contributions are welcome! Please follow these steps:
+
+1. Fork the project repository to your GitHub account.
2. Clone the forked repository to your local machine using a Git client like Git or GitHub Desktop.
-3. Create a new branch with a descriptive name (e.g., `new-feature-branch` or `bugfix-issue-123`).
+3. Create a new branch with a descriptive such as `new-feature-x` or `bugfix-issue-x`.
```sh
-git checkout -b new-feature-branch
+git checkout -b new-feature-x
```
-4. Make changes to the project's codebase.
-5. Commit your changes to your local branch with a clear commit message that explains the changes you've made.
+4. Develop your changes locally.
+5. Commit your updates with a clear explanation of the changes you've made.
```sh
git commit -m 'Implemented new feature.'
```
-6. Push your changes to your forked repository on GitHub using the following command
+6. Push your changes to your forked repository on GitHub.
```sh
-git push origin new-feature-branch
+git push origin new-feature-x
```
7. Create a new pull request to the original project repository. In the pull request, describe the changes you've made and why they're necessary.
-The project maintainers will review your changes and provide feedback or merge them into the main branch.
+8. Once your pull request is reviewed, it will be merged into the main branch of the project repository.
---
## ๐ License
-This project is licensed under the `โน๏ธ INSERT-LICENSE-TYPE` License. See the [LICENSE](https://docs.github.com/en/communities/setting-up-your-project-for-healthy-contributions/adding-a-license-to-a-repository) file for additional info.
+
+This project is protected under the [SELECT-A-LICENSE](https://choosealicense.com/licenses) License. For more details, refer to the [LICENSE](https://choosealicense.com/licenses/) file.
---
## ๐ Acknowledgments
-> - `โน๏ธ List any resources, contributors, inspiration, etc.`
+- List any resources, contributors, inspiration, etc. here.
+
+[**Return**](#Top)
---
diff --git a/pyproject.toml b/pyproject.toml
index 8e279012..bd9cf422 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -4,7 +4,7 @@ build-backend = "poetry.core.masonry.api"
[tool.poetry]
name = "readmeai"
-version = "0.4.036"
+version = "0.4.037"
description = "Generate beautiful README.md files from the terminal, powered by AI."
authors = ["Eli <0x.eli.64s@gmail.com>"]
license = "MIT"
diff --git a/readmeai/config/settings.py b/readmeai/config/settings.py
index e3709125..fb111421 100644
--- a/readmeai/config/settings.py
+++ b/readmeai/config/settings.py
@@ -126,10 +126,10 @@ class PathsConfig(BaseModel):
class PromptsConfig(BaseModel):
"""Pydantic model for OpenAI prompts."""
- code_summary: str
features: str
overview: str
slogan: str
+ summaries: str
class AppConfig(BaseModel):
diff --git a/readmeai/core/model.py b/readmeai/core/model.py
index 7eb87d8f..e48f6e5a 100644
--- a/readmeai/core/model.py
+++ b/readmeai/core/model.py
@@ -57,16 +57,20 @@ def __init__(self, config: settings.AppConfig):
self.rate_limit_semaphore = asyncio.Semaphore(self.rate_limit)
async def code_to_text(
- self, ignore: dict, files: Dict[str, str], prompt: str
+ self,
+ files: Dict[str, str],
+ ignore: Dict[str, List[str]],
+ prompt: str,
+ tree: str,
) -> Dict[str, str]:
"""Converts code to natural language text using large language models.
Parameters
----------
- ignore : dict
- Files, directories, or file extensions to ignore.
files : Dict[str, str]
The repository files to convert to text.
+ ignore : Dict[str, List[str]]
+ Files, directories, or file extensions to ignore.
prompt : str
The prompt to use for the OpenAI API calls.
@@ -88,7 +92,7 @@ async def code_to_text(
self.logger.warning(f"Ignoring file: {path}")
continue
- prompt_code = prompt.format(str(path), contents)
+ prompt_code = prompt.format(tree, str(path), contents)
tasks.append(
asyncio.create_task(
self.generate_text(path, prompt_code, self.tokens)
@@ -160,7 +164,10 @@ async def generate_text(
try:
token_count = get_token_count(prompt, self.encoding)
- if token_count > tokens:
+ if token_count > self.tokens_max:
+ self.logger.warning(
+ f"Truncating tokens: {token_count} > {self.tokens_max}"
+ )
prompt = truncate_tokens(prompt, tokens)
async with self.rate_limit_semaphore:
diff --git a/readmeai/core/tokens.py b/readmeai/core/tokens.py
index cd92c997..03a72a60 100644
--- a/readmeai/core/tokens.py
+++ b/readmeai/core/tokens.py
@@ -1,4 +1,4 @@
-"""Utilities for handling tokennization."""
+"""Utilities for handling language tokens."""
from tiktoken import encoding_for_model, get_encoding
diff --git a/readmeai/main.py b/readmeai/main.py
index 6675e4ea..e234ef1f 100644
--- a/readmeai/main.py
+++ b/readmeai/main.py
@@ -7,8 +7,6 @@
import asyncio
import traceback
-import requests
-
from readmeai.config.settings import (
AppConfig,
AppConfigModel,
@@ -20,6 +18,7 @@
from readmeai.core import logger, model, preprocess
from readmeai.markdown import headers, tables, tree
from readmeai.services import version_control as vcs
+from readmeai.utils import utils
logger = logger.Logger(__name__)
@@ -80,23 +79,25 @@ async def readme_agent(conf: AppConfig, conf_helper: ConfigHelper) -> None:
parser = preprocess.RepositoryParser(conf, conf_helper)
dependencies, files = parser.get_dependencies(temp_dir)
logger.info(f"Dependencies: {dependencies}")
- logger.info(f"Files: {files}")
# Generate codebase file summaries and README.md text via LLMs.
if conf.cli.offline is False:
code_summary = await llm.code_to_text(
- conf_helper.ignore_files,
files,
- conf.prompts.code_summary,
+ conf_helper.ignore_files,
+ conf.prompts.summaries,
+ tree_str,
)
- logger.info(f"Code summaries returned:\n{code_summary[:5]}")
prompts = [
conf.prompts.slogan.format(conf.git.name),
- conf.prompts.overview.format(repository, code_summary),
- conf.prompts.features.format(repository, tree),
+ conf.prompts.overview.format(
+ repository, tree_str, dependencies, code_summary
+ ),
+ conf.prompts.features.format(
+ repository, tree_str, dependencies, code_summary
+ ),
]
slogan, overview, features = await llm.chat_to_text(prompts)
-
else:
conf.md.tables = tables.build_recursive_tables(
repository, temp_dir, placeholder
diff --git a/readmeai/settings/config.toml b/readmeai/settings/config.toml
index 241b6021..5f2d4380 100644
--- a/readmeai/settings/config.toml
+++ b/readmeai/settings/config.toml
@@ -4,9 +4,9 @@ endpoint = "https://api.openai.com/v1/chat/completions"
encoding = "cl100k_base"
model = "gpt-3.5-turbo"
rate_limit = 3
-tokens = 650
-tokens_max = 3800
-temperature = 0.9
+tokens = 750
+tokens_max = 4000
+temperature = 1.0
# Version Control Systems
[base_urls]
@@ -38,12 +38,7 @@ output = "readme-ai.md"
# Prompts
[prompts]
-code_summary = """Offer a comprehensive summary that encapsulates the core functionalities of the code:
-\nPath: {0}\nContents:\n{1}\n Aim for precision and conciseness in your explanation, ensuring a fine balance between detail and brevity.
-Limit your response to a maximum of 225 characters (including spaces).
-"""
-features = """Hello! Analyze the Git codebase {} and create a robust summary of the project's features.
-The following information summarizes each file in the repository to help you get started: \n{}\n
+features = """Hello! Analyze the repository {0} and following the instructions below to generate a comprehensive list of features.
Please provide a comprehensive technical analysis of the codebase and its components.
Consider the codebase as a whole and highlight the key characteristics, design patterns, architectural decisions, and any other noteworthy elements.
Generate your response as a Markdown table with the following columns:
@@ -55,21 +50,29 @@ Generate your response as a Markdown table with the following columns:
| ๐ | **Dependencies** | Examine the external libraries or other systems that this system relies on here. Limit your response to a maximum of 200 characters.|
| ๐งฉ | **Modularity** | Discuss the system's organization into smaller, interchangeable components here. Limit your response to a maximum of 200 characters.|
| ๐งช | **Testing** | Evaluate the system's testing strategies and tools here. Limit your response to a maximum of 200 characters. |
-| โก๏ธ | **Performance** | Analyze how well the system performs, considering speed, efficiency, and resource usage here. Limit your response to a maximum of 200 characters.|
+| โก๏ธ | **Performance** | Analyze how well the system performs, considering speed, efficiency, and resource usage here. Limit your response to a maximum of 200 characters.|
| ๐ | **Security** | Assess the measures the system uses to protect data and maintain functionality here. Limit your response to a maximum of 200 characters.|
| ๐ | **Version Control**| Discuss the system's version control strategies and tools here. Limit your response to a maximum of 200 characters.|
| ๐ | **Integrations** | Evaluate how the system interacts with other systems and services here. Limit your response to a maximum of 200 characters.|
| ๐ถ | **Scalability** | Analyze the system's ability to handle growth here. Limit your response to a maximum of 200 characters. |
-Thank you for your time and effort!
+Repository Details:
+\nDirectory Tree: {1}\nDependencies: {2}\nCode Summaries: {3}\n
+"""
+overview = """Generate a <=100 word summary that describes the capabilities of the repository {0}.
+Focus on the project's use-case and value proposition, not its technical details.
+Do not refer to the project using the URL provided. Below are more details of the
+project for you can get a deep nderstanding of the codebase and its components.
+Repository Details:
+\nDirectory Tree: {1}\nDependencies: {2}\nCode Summaries: {3}\n
"""
-overview = """Please analyze the codebase located at {} and provide a robust, yet succinct overview of the rpoject.
-The following includes a list of the summaries of the files in the repository: \n{}\n
-Craft 3-4 sentences that encapsulate the core functionalities of the project, its purpose, and its value proposition.
+slogan = "Conceptualize a catchy and memorable slogan for the GitHub project: {0}. Limit your response to 80 characters."
+summaries = """Offer a comprehensive summary <= 80 words that encapsulates the core functionalities of the code below.
+Aim for precision and conciseness in your explanation, ensuring a fine balance between detail and brevity.
+\nDirectory Tree: {0}\nPath: {1}\nCode:\n{2}\n
"""
-slogan = "Conceptualize a catchy and memorable slogan for the GitHub project: {}. Limit your response to 80 characters."
-# Markdown Template Code
+# Markdown Templates
[md]
tables = ""
default = "โบ INSERT-TEXT"
diff --git a/readmeai/settings/ignore_files.toml b/readmeai/settings/ignore_files.toml
index 9e92d7de..193b4bd3 100644
--- a/readmeai/settings/ignore_files.toml
+++ b/readmeai/settings/ignore_files.toml
@@ -139,4 +139,5 @@ files = [
"__init__.py",
"start",
"test_binary",
+ "mkdocs.yml",
]
diff --git a/readmeai/utils/utils.py b/readmeai/utils/utils.py
index 1be6b951..651083fc 100644
--- a/readmeai/utils/utils.py
+++ b/readmeai/utils/utils.py
@@ -12,17 +12,21 @@
def should_ignore(conf_helper: ConfigHelper, file_path: Path) -> bool:
"""Filters out files that should be ignored."""
- for directory in conf_helper.ignore_files["directories"]:
- if directory in file_path.parts:
- logger.debug(f"Ignoring directory: {file_path}")
- return True
+ ignore_files = conf_helper.ignore_files
- if file_path.name in conf_helper.ignore_files["files"]:
- logger.debug(f"Ignoring file: {file_path}")
+ if any(
+ directory in file_path.parts
+ for directory in ignore_files["directories"]
+ ):
+ logger.debug(f"Ignoring directory: {file_path.name}")
return True
- if file_path.suffix[1:] in conf_helper.ignore_files["extensions"]:
- logger.debug(f"Ignoring extension: {file_path}")
+ if file_path.name in ignore_files["files"]:
+ logger.debug(f"Ignoring file: {file_path.name}")
+ return True
+
+ if file_path.suffix.lstrip(".") in ignore_files["extensions"]:
+ logger.debug(f"Ignoring extension: {file_path.name}")
return True
return False