diff --git a/.forgejo/workflows/ci.yml b/.forgejo/workflows/ci.yml
index 807719a..0e7439b 100644
--- a/.forgejo/workflows/ci.yml
+++ b/.forgejo/workflows/ci.yml
@@ -30,3 +30,16 @@ jobs:
run: |
./tests.sh
+ # Notify if any previous step in this job failed
+ - name: Notify on failure
+ if: ${{ failure() }}
+ env:
+ WEBHOOK_URL: ${{ secrets.NODERED_WEBHOOK_URL }}
+ REPOSITORY: ${{ forgejo.repository }}
+ RUN_NUMBER: ${{ forgejo.run_number }}
+ SERVER_URL: ${{ forgejo.server_url }}
+ run: |
+ curl -X POST \
+ -H "Content-Type: application/json" \
+ -d "{\"repository\":\"$REPOSITORY\",\"run_number\":\"$RUN_NUMBER\",\"status\":\"failure\",\"url\":\"$SERVER_URL/$REPOSITORY/actions/runs/$RUN_NUMBER\"}" \
+ "$WEBHOOK_URL"
diff --git a/.forgejo/workflows/lint.yml b/.forgejo/workflows/lint.yml
index 60768d8..a8ba06d 100644
--- a/.forgejo/workflows/lint.yml
+++ b/.forgejo/workflows/lint.yml
@@ -15,7 +15,7 @@ jobs:
run: |
apt-get update
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
- black pyflakes3 python3-bandit
+ black pyflakes3 python3-bandit vulture
- name: Run linters
run: |
@@ -24,3 +24,18 @@ jobs:
pyflakes3 src/*
pyflakes3 tests/*
bandit -s B110 -r src/
+ vulture .
+
+ # Notify if any previous step in this job failed
+ - name: Notify on failure
+ if: ${{ failure() }}
+ env:
+ WEBHOOK_URL: ${{ secrets.NODERED_WEBHOOK_URL }}
+ REPOSITORY: ${{ forgejo.repository }}
+ RUN_NUMBER: ${{ forgejo.run_number }}
+ SERVER_URL: ${{ forgejo.server_url }}
+ run: |
+ curl -X POST \
+ -H "Content-Type: application/json" \
+ -d "{\"repository\":\"$REPOSITORY\",\"run_number\":\"$RUN_NUMBER\",\"status\":\"failure\",\"url\":\"$SERVER_URL/$REPOSITORY/actions/runs/$RUN_NUMBER\"}" \
+ "$WEBHOOK_URL"
diff --git a/.forgejo/workflows/trivy.yml b/.forgejo/workflows/trivy.yml
index 18ced32..fad2f6f 100644
--- a/.forgejo/workflows/trivy.yml
+++ b/.forgejo/workflows/trivy.yml
@@ -24,3 +24,17 @@ jobs:
- name: Run trivy
run: |
trivy fs --no-progress --ignore-unfixed --format table --disable-telemetry .
+
+ # Notify if any previous step in this job failed
+ - name: Notify on failure
+ if: ${{ failure() }}
+ env:
+ WEBHOOK_URL: ${{ secrets.NODERED_WEBHOOK_URL }}
+ REPOSITORY: ${{ forgejo.repository }}
+ RUN_NUMBER: ${{ forgejo.run_number }}
+ SERVER_URL: ${{ forgejo.server_url }}
+ run: |
+ curl -X POST \
+ -H "Content-Type: application/json" \
+ -d "{\"repository\":\"$REPOSITORY\",\"run_number\":\"$RUN_NUMBER\",\"status\":\"failure\",\"url\":\"$SERVER_URL/$REPOSITORY/actions/runs/$RUN_NUMBER\"}" \
+ "$WEBHOOK_URL"
diff --git a/.gitignore b/.gitignore
index 7bc15a0..dedc5da 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,4 @@ dist
*.yml
*.j2
*.toml
+regenerated_*
diff --git a/Dockerfile.debbuild b/Dockerfile.debbuild
new file mode 100644
index 0000000..8d185b1
--- /dev/null
+++ b/Dockerfile.debbuild
@@ -0,0 +1,85 @@
+# syntax=docker/dockerfile:1
+ARG BASE_IMAGE=debian:bookworm
+FROM ${BASE_IMAGE}
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+# If Ubuntu, ensure Universe is enabled.
+RUN set -eux; \
+ . /etc/os-release; \
+ if [ "${ID:-}" = "ubuntu" ]; then \
+ apt-get update; \
+ apt-get install -y --no-install-recommends software-properties-common ca-certificates; \
+ add-apt-repository -y universe; \
+ fi; \
+ if [ "${VERSION_CODENAME:-}" = "jammy" ]; then \
+ apt-get update; \
+ apt-get install -y --no-install-recommends python3-tomli; \
+ fi
+
+# Build deps
+RUN set -eux; \
+ apt-get update; \
+ apt-get install -y --no-install-recommends \
+ build-essential \
+ devscripts \
+ debhelper \
+ dh-python \
+ pybuild-plugin-pyproject \
+ python3-all \
+ python3-poetry-core \
+ python3-yaml \
+ python3-defusedxml \
+ python3-jinja2 \
+ python3-toml \
+ rsync \
+ ca-certificates \
+ ; \
+ rm -rf /var/lib/apt/lists/*
+
+# Build runner script
+RUN set -eux; \
+ cat > /usr/local/bin/build-deb <<'EOF'
+#!/usr/bin/env bash
+set -euo pipefail
+
+SRC="${SRC:-/src}"
+WORKROOT="${WORKROOT:-/work}"
+WORK="${WORKROOT}/src"
+OUT="${OUT:-/out}"
+
+mkdir -p "$WORK" "$OUT"
+
+rsync -a --delete \
+ --exclude '.git' \
+ --exclude '.venv' \
+ --exclude 'dist' \
+ --exclude 'build' \
+ --exclude '__pycache__' \
+ --exclude '.pytest_cache' \
+ --exclude '.mypy_cache' \
+ "${SRC}/" "${WORK}/"
+
+cd "${WORK}"
+if [ -n "${SUITE:-}" ]; then
+ export DEBEMAIL="mig@mig5.net"
+ export DEBFULLNAME="Miguel Jacq"
+
+ dch --distribution "$SUITE" --local "~${SUITE}" "CI build for $SUITE"
+fi
+dpkg-buildpackage -us -uc -b
+
+shopt -s nullglob
+cp -v "${WORKROOT}"/*.deb \
+ "${WORKROOT}"/*.changes \
+ "${WORKROOT}"/*.buildinfo \
+ "${WORKROOT}"/*.dsc \
+ "${WORKROOT}"/*.tar.* \
+ "${OUT}/" || true
+
+echo "Artifacts copied to ${OUT}"
+EOF
+RUN chmod +x /usr/local/bin/build-deb
+
+WORKDIR /work
+ENTRYPOINT ["/usr/local/bin/build-deb"]
diff --git a/README.md b/README.md
index 7c0d59e..80763f3 100644
--- a/README.md
+++ b/README.md
@@ -1,8 +1,11 @@
# JinjaTurtle
+
+

+
+
JinjaTurtle is a command-line tool to help you generate Jinja2 templates and
-Ansible `defaults/main.yml` files from a native configuration file of a piece
-of software.
+Ansible inventory from a native configuration file of a piece of software.
## How it works
@@ -13,23 +16,40 @@ of software.
role.
* A Jinja2 file is generated from the file with those parameter key names
injected as the `{{ variable }}` names.
- * A `defaults/main.yml` is generated with those key names and the *values*
- taken from the original config file as the defaults.
+ * An Ansible inventory YAML file is generated with those key names and the
+ *values* taken from the original config file as the defaults.
-By default, the Jinja2 template and the `defaults/main.yml` are printed to
+By default, the Jinja2 template and the Ansible inventory are printed to
stdout. However, it is possible to output the results to new files.
## What sort of config files can it handle?
-TOML, YAML, INI and JSON style config files should be okay. There are always
+TOML, YAML, INI, JSON and XML-style config files should be okay. There are always
going to be some edge cases in very complex files that are difficult to work
with, though, so you may still find that you need to tweak the results.
+For XML and YAML files, JinjaTurtle will attempt to generate 'for' loops
+and lists in the Ansible yaml if the config file looks homogenous enough to
+support it. However, if it lacks the confidence in this, it will fall back to
+using scalar-style flattened attributes.
+
+You may need or wish to tidy up the config to suit your needs.
+
The goal here is really to *speed up* converting files into Ansible/Jinja2,
but not necessarily to make it perfect.
## How to install it
+### Ubuntu/Debian apt repository
+
+```bash
+sudo mkdir -p /usr/share/keyrings
+curl -fsSL https://mig5.net/static/mig5.asc | sudo gpg --dearmor -o /usr/share/keyrings/mig5.gpg
+echo "deb [arch=amd64 signed-by=/usr/share/keyrings/mig5.gpg] https://apt.mig5.net $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/mig5.list
+sudo apt update
+sudo apt install jinjaturtle
+```
+
### From PyPi
```
@@ -64,9 +84,9 @@ jinjaturtle php.ini \
## Full usage info
```
-usage: jinjaturtle [-h] -r ROLE_NAME [-f {ini,toml}] [-d DEFAULTS_OUTPUT] [-t TEMPLATE_OUTPUT] config
+usage: jinjaturtle [-h] -r ROLE_NAME [-f {json,ini,toml,yaml,xml}] [-d DEFAULTS_OUTPUT] [-t TEMPLATE_OUTPUT] config
-Convert a config file into an Ansible defaults file and Jinja2 template.
+Convert a config file into Ansible inventory and a Jinja2 template.
positional arguments:
config Path to the source configuration file (TOML or INI-style).
@@ -75,7 +95,7 @@ options:
-h, --help show this help message and exit
-r, --role-name ROLE_NAME
Ansible role name, used as variable prefix (e.g. cometbft).
- -f, --format {ini,toml}
+ -f, --format {ini,json,toml,xml}
Force config format instead of auto-detecting from filename.
-d, --defaults-output DEFAULTS_OUTPUT
Path to write defaults/main.yml. If omitted, defaults YAML is printed to stdout.
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..9db1779
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,11 @@
+jinjaturtle (0.3.3) unstable; urgency=medium
+
+ * Fixes for tomli on Ubuntu 22
+
+ -- Miguel Jacq Mon, 15 Dec 2025 14:00:00 +0000
+
+jinjaturtle (0.3.2) unstable; urgency=medium
+
+ * Initial package
+
+ -- Miguel Jacq Mon, 15 Dec 2025 12:00:00 +0000
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..72a7e21
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,28 @@
+Source: jinjaturtle
+Section: admin
+Priority: optional
+Maintainer: Miguel Jacq
+Rules-Requires-Root: no
+Build-Depends:
+ debhelper-compat (= 13),
+ dh-python,
+ pybuild-plugin-pyproject,
+ python3-all,
+ python3-poetry-core,
+ python3-yaml,
+ python3-toml,
+ python3-defusedxml,
+ python3-jinja2
+Standards-Version: 4.6.2
+Homepage: https://git.mig5.net/mig5/jinjaturtle
+
+Package: jinjaturtle
+Architecture: all
+Depends:
+ ${misc:Depends},
+ ${python3:Depends},
+ python3-yaml,
+ python3-toml,
+ python3-defusedxml,
+ python3-jinja2
+Description: Convert config files into Ansible defaults and Jinja2 templates.
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..4c26136
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,6 @@
+#!/usr/bin/make -f
+export PYBUILD_NAME=jinjaturtle
+export PYBUILD_SYSTEM=pyproject
+
+%:
+ dh $@ --with python3 --buildsystem=pybuild
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/debian/source/options b/debian/source/options
new file mode 100644
index 0000000..c32a8c1
--- /dev/null
+++ b/debian/source/options
@@ -0,0 +1,6 @@
+tar-ignore = ".git"
+tar-ignore = ".venv"
+tar-ignore = "__pycache__"
+tar-ignore = ".pytest_cache"
+tar-ignore = "dist"
+tar-ignore = "build"
diff --git a/jinjaturtle.svg b/jinjaturtle.svg
new file mode 100644
index 0000000..2e6fcf2
--- /dev/null
+++ b/jinjaturtle.svg
@@ -0,0 +1,59 @@
+
diff --git a/poetry.lock b/poetry.lock
index 8891448..0d40c6c 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,42 +1,5 @@
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
-[[package]]
-name = "backports-tarfile"
-version = "1.2.0"
-description = "Backport of CPython tarfile module"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "backports.tarfile-1.2.0-py3-none-any.whl", hash = "sha256:77e284d754527b01fb1e6fa8a1afe577858ebe4e9dad8919e34c862cb399bc34"},
- {file = "backports_tarfile-1.2.0.tar.gz", hash = "sha256:d75e02c268746e1b8144c278978b6e98e85de6ad16f8e4b0844a154557eca991"},
-]
-
-[package.extras]
-docs = ["furo", "jaraco.packaging (>=9.3)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-testing = ["jaraco.test", "pytest (!=8.0.*)", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)"]
-
-[[package]]
-name = "build"
-version = "1.3.0"
-description = "A simple, correct Python build frontend"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "build-1.3.0-py3-none-any.whl", hash = "sha256:7145f0b5061ba90a1500d60bd1b13ca0a8a4cebdd0cc16ed8adf1c0e739f43b4"},
- {file = "build-1.3.0.tar.gz", hash = "sha256:698edd0ea270bde950f53aed21f3a0135672206f3911e0176261a31e0e07b397"},
-]
-
-[package.dependencies]
-colorama = {version = "*", markers = "os_name == \"nt\""}
-importlib-metadata = {version = ">=4.6", markers = "python_full_version < \"3.10.2\""}
-packaging = ">=19.1"
-pyproject_hooks = "*"
-tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
-
-[package.extras]
-uv = ["uv (>=0.1.18)"]
-virtualenv = ["virtualenv (>=20.11)", "virtualenv (>=20.17)", "virtualenv (>=20.31)"]
-
[[package]]
name = "certifi"
version = "2025.11.12"
@@ -48,102 +11,6 @@ files = [
{file = "certifi-2025.11.12.tar.gz", hash = "sha256:d8ab5478f2ecd78af242878415affce761ca6bc54a22a27e026d7c25357c3316"},
]
-[[package]]
-name = "cffi"
-version = "2.0.0"
-description = "Foreign Function Interface for Python calling C code."
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"},
- {file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"},
- {file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"},
- {file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"},
- {file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"},
- {file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"},
- {file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"},
- {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"},
- {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"},
- {file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"},
- {file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"},
- {file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"},
- {file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"},
- {file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"},
- {file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"},
- {file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"},
- {file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"},
- {file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"},
- {file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"},
- {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"},
- {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"},
- {file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"},
- {file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"},
- {file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"},
- {file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"},
- {file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"},
- {file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"},
- {file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"},
- {file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"},
- {file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"},
- {file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"},
- {file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"},
- {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"},
- {file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"},
- {file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"},
- {file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"},
- {file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"},
- {file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"},
- {file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"},
- {file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"},
- {file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"},
- {file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"},
- {file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"},
- {file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"},
- {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"},
- {file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"},
- {file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"},
- {file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"},
- {file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"},
- {file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"},
- {file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"},
- {file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"},
- {file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"},
- {file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"},
- {file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"},
- {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"},
- {file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"},
- {file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"},
- {file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"},
- {file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"},
- {file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"},
- {file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"},
- {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"},
- {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"},
- {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"},
- {file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"},
- {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"},
- {file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"},
- {file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"},
- {file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"},
- {file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"},
- {file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"},
- {file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"},
- {file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"},
- {file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"},
- {file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"},
- {file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"},
- {file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"},
- {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"},
- {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"},
- {file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"},
- {file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"},
- {file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"},
- {file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"},
-]
-
-[package.dependencies]
-pycparser = {version = "*", markers = "implementation_name != \"PyPy\""}
-
[[package]]
name = "charset-normalizer"
version = "3.4.4"
@@ -279,103 +146,103 @@ files = [
[[package]]
name = "coverage"
-version = "7.12.0"
+version = "7.13.0"
description = "Code coverage measurement for Python"
optional = false
python-versions = ">=3.10"
files = [
- {file = "coverage-7.12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:32b75c2ba3f324ee37af3ccee5b30458038c50b349ad9b88cee85096132a575b"},
- {file = "coverage-7.12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cb2a1b6ab9fe833714a483a915de350abc624a37149649297624c8d57add089c"},
- {file = "coverage-7.12.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5734b5d913c3755e72f70bf6cc37a0518d4f4745cde760c5d8e12005e62f9832"},
- {file = "coverage-7.12.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:b527a08cdf15753279b7afb2339a12073620b761d79b81cbe2cdebdb43d90daa"},
- {file = "coverage-7.12.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9bb44c889fb68004e94cab71f6a021ec83eac9aeabdbb5a5a88821ec46e1da73"},
- {file = "coverage-7.12.0-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:4b59b501455535e2e5dde5881739897967b272ba25988c89145c12d772810ccb"},
- {file = "coverage-7.12.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d8842f17095b9868a05837b7b1b73495293091bed870e099521ada176aa3e00e"},
- {file = "coverage-7.12.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c5a6f20bf48b8866095c6820641e7ffbe23f2ac84a2efc218d91235e404c7777"},
- {file = "coverage-7.12.0-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:5f3738279524e988d9da2893f307c2093815c623f8d05a8f79e3eff3a7a9e553"},
- {file = "coverage-7.12.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0d68c1f7eabbc8abe582d11fa393ea483caf4f44b0af86881174769f185c94d"},
- {file = "coverage-7.12.0-cp310-cp310-win32.whl", hash = "sha256:7670d860e18b1e3ee5930b17a7d55ae6287ec6e55d9799982aa103a2cc1fa2ef"},
- {file = "coverage-7.12.0-cp310-cp310-win_amd64.whl", hash = "sha256:f999813dddeb2a56aab5841e687b68169da0d3f6fc78ccf50952fa2463746022"},
- {file = "coverage-7.12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa124a3683d2af98bd9d9c2bfa7a5076ca7e5ab09fdb96b81fa7d89376ae928f"},
- {file = "coverage-7.12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d93fbf446c31c0140208dcd07c5d882029832e8ed7891a39d6d44bd65f2316c3"},
- {file = "coverage-7.12.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:52ca620260bd8cd6027317bdd8b8ba929be1d741764ee765b42c4d79a408601e"},
- {file = "coverage-7.12.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f3433ffd541380f3a0e423cff0f4926d55b0cc8c1d160fdc3be24a4c03aa65f7"},
- {file = "coverage-7.12.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f7bbb321d4adc9f65e402c677cd1c8e4c2d0105d3ce285b51b4d87f1d5db5245"},
- {file = "coverage-7.12.0-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:22a7aade354a72dff3b59c577bfd18d6945c61f97393bc5fb7bd293a4237024b"},
- {file = "coverage-7.12.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:3ff651dcd36d2fea66877cd4a82de478004c59b849945446acb5baf9379a1b64"},
- {file = "coverage-7.12.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:31b8b2e38391a56e3cea39d22a23faaa7c3fc911751756ef6d2621d2a9daf742"},
- {file = "coverage-7.12.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:297bc2da28440f5ae51c845a47c8175a4db0553a53827886e4fb25c66633000c"},
- {file = "coverage-7.12.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:6ff7651cc01a246908eac162a6a86fc0dbab6de1ad165dfb9a1e2ec660b44984"},
- {file = "coverage-7.12.0-cp311-cp311-win32.whl", hash = "sha256:313672140638b6ddb2c6455ddeda41c6a0b208298034544cfca138978c6baed6"},
- {file = "coverage-7.12.0-cp311-cp311-win_amd64.whl", hash = "sha256:a1783ed5bd0d5938d4435014626568dc7f93e3cb99bc59188cc18857c47aa3c4"},
- {file = "coverage-7.12.0-cp311-cp311-win_arm64.whl", hash = "sha256:4648158fd8dd9381b5847622df1c90ff314efbfc1df4550092ab6013c238a5fc"},
- {file = "coverage-7.12.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:29644c928772c78512b48e14156b81255000dcfd4817574ff69def189bcb3647"},
- {file = "coverage-7.12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8638cbb002eaa5d7c8d04da667813ce1067080b9a91099801a0053086e52b736"},
- {file = "coverage-7.12.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:083631eeff5eb9992c923e14b810a179798bb598e6a0dd60586819fc23be6e60"},
- {file = "coverage-7.12.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:99d5415c73ca12d558e07776bd957c4222c687b9f1d26fa0e1b57e3598bdcde8"},
- {file = "coverage-7.12.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e949ebf60c717c3df63adb4a1a366c096c8d7fd8472608cd09359e1bd48ef59f"},
- {file = "coverage-7.12.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:6d907ddccbca819afa2cd014bc69983b146cca2735a0b1e6259b2a6c10be1e70"},
- {file = "coverage-7.12.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b1518ecbad4e6173f4c6e6c4a46e49555ea5679bf3feda5edb1b935c7c44e8a0"},
- {file = "coverage-7.12.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:51777647a749abdf6f6fd8c7cffab12de68ab93aab15efc72fbbb83036c2a068"},
- {file = "coverage-7.12.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:42435d46d6461a3b305cdfcad7cdd3248787771f53fe18305548cba474e6523b"},
- {file = "coverage-7.12.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5bcead88c8423e1855e64b8057d0544e33e4080b95b240c2a355334bb7ced937"},
- {file = "coverage-7.12.0-cp312-cp312-win32.whl", hash = "sha256:dcbb630ab034e86d2a0f79aefd2be07e583202f41e037602d438c80044957baa"},
- {file = "coverage-7.12.0-cp312-cp312-win_amd64.whl", hash = "sha256:2fd8354ed5d69775ac42986a691fbf68b4084278710cee9d7c3eaa0c28fa982a"},
- {file = "coverage-7.12.0-cp312-cp312-win_arm64.whl", hash = "sha256:737c3814903be30695b2de20d22bcc5428fdae305c61ba44cdc8b3252984c49c"},
- {file = "coverage-7.12.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:47324fffca8d8eae7e185b5bb20c14645f23350f870c1649003618ea91a78941"},
- {file = "coverage-7.12.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:ccf3b2ede91decd2fb53ec73c1f949c3e034129d1e0b07798ff1d02ea0c8fa4a"},
- {file = "coverage-7.12.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b365adc70a6936c6b0582dc38746b33b2454148c02349345412c6e743efb646d"},
- {file = "coverage-7.12.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:bc13baf85cd8a4cfcf4a35c7bc9d795837ad809775f782f697bf630b7e200211"},
- {file = "coverage-7.12.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:099d11698385d572ceafb3288a5b80fe1fc58bf665b3f9d362389de488361d3d"},
- {file = "coverage-7.12.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:473dc45d69694069adb7680c405fb1e81f60b2aff42c81e2f2c3feaf544d878c"},
- {file = "coverage-7.12.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:583f9adbefd278e9de33c33d6846aa8f5d164fa49b47144180a0e037f0688bb9"},
- {file = "coverage-7.12.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b2089cc445f2dc0af6f801f0d1355c025b76c24481935303cf1af28f636688f0"},
- {file = "coverage-7.12.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:950411f1eb5d579999c5f66c62a40961f126fc71e5e14419f004471957b51508"},
- {file = "coverage-7.12.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b1aab7302a87bafebfe76b12af681b56ff446dc6f32ed178ff9c092ca776e6bc"},
- {file = "coverage-7.12.0-cp313-cp313-win32.whl", hash = "sha256:d7e0d0303c13b54db495eb636bc2465b2fb8475d4c8bcec8fe4b5ca454dfbae8"},
- {file = "coverage-7.12.0-cp313-cp313-win_amd64.whl", hash = "sha256:ce61969812d6a98a981d147d9ac583a36ac7db7766f2e64a9d4d059c2fe29d07"},
- {file = "coverage-7.12.0-cp313-cp313-win_arm64.whl", hash = "sha256:bcec6f47e4cb8a4c2dc91ce507f6eefc6a1b10f58df32cdc61dff65455031dfc"},
- {file = "coverage-7.12.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:459443346509476170d553035e4a3eed7b860f4fe5242f02de1010501956ce87"},
- {file = "coverage-7.12.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:04a79245ab2b7a61688958f7a855275997134bc84f4a03bc240cf64ff132abf6"},
- {file = "coverage-7.12.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:09a86acaaa8455f13d6a99221d9654df249b33937b4e212b4e5a822065f12aa7"},
- {file = "coverage-7.12.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:907e0df1b71ba77463687a74149c6122c3f6aac56c2510a5d906b2f368208560"},
- {file = "coverage-7.12.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9b57e2d0ddd5f0582bae5437c04ee71c46cd908e7bc5d4d0391f9a41e812dd12"},
- {file = "coverage-7.12.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:58c1c6aa677f3a1411fe6fb28ec3a942e4f665df036a3608816e0847fad23296"},
- {file = "coverage-7.12.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:4c589361263ab2953e3c4cd2a94db94c4ad4a8e572776ecfbad2389c626e4507"},
- {file = "coverage-7.12.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:91b810a163ccad2e43b1faa11d70d3cf4b6f3d83f9fd5f2df82a32d47b648e0d"},
- {file = "coverage-7.12.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:40c867af715f22592e0d0fb533a33a71ec9e0f73a6945f722a0c85c8c1cbe3a2"},
- {file = "coverage-7.12.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:68b0d0a2d84f333de875666259dadf28cc67858bc8fd8b3f1eae84d3c2bec455"},
- {file = "coverage-7.12.0-cp313-cp313t-win32.whl", hash = "sha256:73f9e7fbd51a221818fd11b7090eaa835a353ddd59c236c57b2199486b116c6d"},
- {file = "coverage-7.12.0-cp313-cp313t-win_amd64.whl", hash = "sha256:24cff9d1f5743f67db7ba46ff284018a6e9aeb649b67aa1e70c396aa1b7cb23c"},
- {file = "coverage-7.12.0-cp313-cp313t-win_arm64.whl", hash = "sha256:c87395744f5c77c866d0f5a43d97cc39e17c7f1cb0115e54a2fe67ca75c5d14d"},
- {file = "coverage-7.12.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:a1c59b7dc169809a88b21a936eccf71c3895a78f5592051b1af8f4d59c2b4f92"},
- {file = "coverage-7.12.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:8787b0f982e020adb732b9f051f3e49dd5054cebbc3f3432061278512a2b1360"},
- {file = "coverage-7.12.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:5ea5a9f7dc8877455b13dd1effd3202e0bca72f6f3ab09f9036b1bcf728f69ac"},
- {file = "coverage-7.12.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:fdba9f15849534594f60b47c9a30bc70409b54947319a7c4fd0e8e3d8d2f355d"},
- {file = "coverage-7.12.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a00594770eb715854fb1c57e0dea08cce6720cfbc531accdb9850d7c7770396c"},
- {file = "coverage-7.12.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5560c7e0d82b42eb1951e4f68f071f8017c824ebfd5a6ebe42c60ac16c6c2434"},
- {file = "coverage-7.12.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:d6c2e26b481c9159c2773a37947a9718cfdc58893029cdfb177531793e375cfc"},
- {file = "coverage-7.12.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:6e1a8c066dabcde56d5d9fed6a66bc19a2883a3fe051f0c397a41fc42aedd4cc"},
- {file = "coverage-7.12.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:f7ba9da4726e446d8dd8aae5a6cd872511184a5d861de80a86ef970b5dacce3e"},
- {file = "coverage-7.12.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:e0f483ab4f749039894abaf80c2f9e7ed77bbf3c737517fb88c8e8e305896a17"},
- {file = "coverage-7.12.0-cp314-cp314-win32.whl", hash = "sha256:76336c19a9ef4a94b2f8dc79f8ac2da3f193f625bb5d6f51a328cd19bfc19933"},
- {file = "coverage-7.12.0-cp314-cp314-win_amd64.whl", hash = "sha256:7c1059b600aec6ef090721f8f633f60ed70afaffe8ecab85b59df748f24b31fe"},
- {file = "coverage-7.12.0-cp314-cp314-win_arm64.whl", hash = "sha256:172cf3a34bfef42611963e2b661302a8931f44df31629e5b1050567d6b90287d"},
- {file = "coverage-7.12.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:aa7d48520a32cb21c7a9b31f81799e8eaec7239db36c3b670be0fa2403828d1d"},
- {file = "coverage-7.12.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:90d58ac63bc85e0fb919f14d09d6caa63f35a5512a2205284b7816cafd21bb03"},
- {file = "coverage-7.12.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:ca8ecfa283764fdda3eae1bdb6afe58bf78c2c3ec2b2edcb05a671f0bba7b3f9"},
- {file = "coverage-7.12.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:874fe69a0785d96bd066059cd4368022cebbec1a8958f224f0016979183916e6"},
- {file = "coverage-7.12.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5b3c889c0b8b283a24d721a9eabc8ccafcfc3aebf167e4cd0d0e23bf8ec4e339"},
- {file = "coverage-7.12.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:8bb5b894b3ec09dcd6d3743229dc7f2c42ef7787dc40596ae04c0edda487371e"},
- {file = "coverage-7.12.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:79a44421cd5fba96aa57b5e3b5a4d3274c449d4c622e8f76882d76635501fd13"},
- {file = "coverage-7.12.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:33baadc0efd5c7294f436a632566ccc1f72c867f82833eb59820ee37dc811c6f"},
- {file = "coverage-7.12.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:c406a71f544800ef7e9e0000af706b88465f3573ae8b8de37e5f96c59f689ad1"},
- {file = "coverage-7.12.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e71bba6a40883b00c6d571599b4627f50c360b3d0d02bfc658168936be74027b"},
- {file = "coverage-7.12.0-cp314-cp314t-win32.whl", hash = "sha256:9157a5e233c40ce6613dead4c131a006adfda70e557b6856b97aceed01b0e27a"},
- {file = "coverage-7.12.0-cp314-cp314t-win_amd64.whl", hash = "sha256:e84da3a0fd233aeec797b981c51af1cabac74f9bd67be42458365b30d11b5291"},
- {file = "coverage-7.12.0-cp314-cp314t-win_arm64.whl", hash = "sha256:01d24af36fedda51c2b1aca56e4330a3710f83b02a5ff3743a6b015ffa7c9384"},
- {file = "coverage-7.12.0-py3-none-any.whl", hash = "sha256:159d50c0b12e060b15ed3d39f87ed43d4f7f7ad40b8a534f4dd331adbb51104a"},
- {file = "coverage-7.12.0.tar.gz", hash = "sha256:fc11e0a4e372cb5f282f16ef90d4a585034050ccda536451901abfb19a57f40c"},
+ {file = "coverage-7.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:02d9fb9eccd48f6843c98a37bd6817462f130b86da8660461e8f5e54d4c06070"},
+ {file = "coverage-7.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:367449cf07d33dc216c083f2036bb7d976c6e4903ab31be400ad74ad9f85ce98"},
+ {file = "coverage-7.13.0-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cdb3c9f8fef0a954c632f64328a3935988d33a6604ce4bf67ec3e39670f12ae5"},
+ {file = "coverage-7.13.0-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d10fd186aac2316f9bbb46ef91977f9d394ded67050ad6d84d94ed6ea2e8e54e"},
+ {file = "coverage-7.13.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f88ae3e69df2ab62fb0bc5219a597cb890ba5c438190ffa87490b315190bb33"},
+ {file = "coverage-7.13.0-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c4be718e51e86f553bcf515305a158a1cd180d23b72f07ae76d6017c3cc5d791"},
+ {file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:a00d3a393207ae12f7c49bb1c113190883b500f48979abb118d8b72b8c95c032"},
+ {file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a7b1cd820e1b6116f92c6128f1188e7afe421c7e1b35fa9836b11444e53ebd9"},
+ {file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:37eee4e552a65866f15dedd917d5e5f3d59805994260720821e2c1b51ac3248f"},
+ {file = "coverage-7.13.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:62d7c4f13102148c78d7353c6052af6d899a7f6df66a32bddcc0c0eb7c5326f8"},
+ {file = "coverage-7.13.0-cp310-cp310-win32.whl", hash = "sha256:24e4e56304fdb56f96f80eabf840eab043b3afea9348b88be680ec5986780a0f"},
+ {file = "coverage-7.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:74c136e4093627cf04b26a35dab8cbfc9b37c647f0502fc313376e11726ba303"},
+ {file = "coverage-7.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0dfa3855031070058add1a59fdfda0192fd3e8f97e7c81de0596c145dea51820"},
+ {file = "coverage-7.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4fdb6f54f38e334db97f72fa0c701e66d8479af0bc3f9bfb5b90f1c30f54500f"},
+ {file = "coverage-7.13.0-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7e442c013447d1d8d195be62852270b78b6e255b79b8675bad8479641e21fd96"},
+ {file = "coverage-7.13.0-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:1ed5630d946859de835a85e9a43b721123a8a44ec26e2830b296d478c7fd4259"},
+ {file = "coverage-7.13.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f15a931a668e58087bc39d05d2b4bf4b14ff2875b49c994bbdb1c2217a8daeb"},
+ {file = "coverage-7.13.0-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:30a3a201a127ea57f7e14ba43c93c9c4be8b7d17a26e03bb49e6966d019eede9"},
+ {file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:7a485ff48fbd231efa32d58f479befce52dcb6bfb2a88bb7bf9a0b89b1bc8030"},
+ {file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:22486cdafba4f9e471c816a2a5745337742a617fef68e890d8baf9f3036d7833"},
+ {file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:263c3dbccc78e2e331e59e90115941b5f53e85cfcc6b3b2fbff1fd4e3d2c6ea8"},
+ {file = "coverage-7.13.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e5330fa0cc1f5c3c4c3bb8e101b742025933e7848989370a1d4c8c5e401ea753"},
+ {file = "coverage-7.13.0-cp311-cp311-win32.whl", hash = "sha256:0f4872f5d6c54419c94c25dd6ae1d015deeb337d06e448cd890a1e89a8ee7f3b"},
+ {file = "coverage-7.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51a202e0f80f241ccb68e3e26e19ab5b3bf0f813314f2c967642f13ebcf1ddfe"},
+ {file = "coverage-7.13.0-cp311-cp311-win_arm64.whl", hash = "sha256:d2a9d7f1c11487b1c69367ab3ac2d81b9b3721f097aa409a3191c3e90f8f3dd7"},
+ {file = "coverage-7.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0b3d67d31383c4c68e19a88e28fc4c2e29517580f1b0ebec4a069d502ce1e0bf"},
+ {file = "coverage-7.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:581f086833d24a22c89ae0fe2142cfaa1c92c930adf637ddf122d55083fb5a0f"},
+ {file = "coverage-7.13.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0a3a30f0e257df382f5f9534d4ce3d4cf06eafaf5192beb1a7bd066cb10e78fb"},
+ {file = "coverage-7.13.0-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:583221913fbc8f53b88c42e8dbb8fca1d0f2e597cb190ce45916662b8b9d9621"},
+ {file = "coverage-7.13.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5f5d9bd30756fff3e7216491a0d6d520c448d5124d3d8e8f56446d6412499e74"},
+ {file = "coverage-7.13.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a23e5a1f8b982d56fa64f8e442e037f6ce29322f1f9e6c2344cd9e9f4407ee57"},
+ {file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:9b01c22bc74a7fb44066aaf765224c0d933ddf1f5047d6cdfe4795504a4493f8"},
+ {file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:898cce66d0836973f48dda4e3514d863d70142bdf6dfab932b9b6a90ea5b222d"},
+ {file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:3ab483ea0e251b5790c2aac03acde31bff0c736bf8a86829b89382b407cd1c3b"},
+ {file = "coverage-7.13.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1d84e91521c5e4cb6602fe11ece3e1de03b2760e14ae4fcf1a4b56fa3c801fcd"},
+ {file = "coverage-7.13.0-cp312-cp312-win32.whl", hash = "sha256:193c3887285eec1dbdb3f2bd7fbc351d570ca9c02ca756c3afbc71b3c98af6ef"},
+ {file = "coverage-7.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:4f3e223b2b2db5e0db0c2b97286aba0036ca000f06aca9b12112eaa9af3d92ae"},
+ {file = "coverage-7.13.0-cp312-cp312-win_arm64.whl", hash = "sha256:086cede306d96202e15a4b77ace8472e39d9f4e5f9fd92dd4fecdfb2313b2080"},
+ {file = "coverage-7.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:28ee1c96109974af104028a8ef57cec21447d42d0e937c0275329272e370ebcf"},
+ {file = "coverage-7.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:d1e97353dcc5587b85986cda4ff3ec98081d7e84dd95e8b2a6d59820f0545f8a"},
+ {file = "coverage-7.13.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:99acd4dfdfeb58e1937629eb1ab6ab0899b131f183ee5f23e0b5da5cba2fec74"},
+ {file = "coverage-7.13.0-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:ff45e0cd8451e293b63ced93161e189780baf444119391b3e7d25315060368a6"},
+ {file = "coverage-7.13.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f4f72a85316d8e13234cafe0a9f81b40418ad7a082792fa4165bd7d45d96066b"},
+ {file = "coverage-7.13.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:11c21557d0e0a5a38632cbbaca5f008723b26a89d70db6315523df6df77d6232"},
+ {file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:76541dc8d53715fb4f7a3a06b34b0dc6846e3c69bc6204c55653a85dd6220971"},
+ {file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:6e9e451dee940a86789134b6b0ffbe31c454ade3b849bb8a9d2cca2541a8e91d"},
+ {file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:5c67dace46f361125e6b9cace8fe0b729ed8479f47e70c89b838d319375c8137"},
+ {file = "coverage-7.13.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f59883c643cb19630500f57016f76cfdcd6845ca8c5b5ea1f6e17f74c8e5f511"},
+ {file = "coverage-7.13.0-cp313-cp313-win32.whl", hash = "sha256:58632b187be6f0be500f553be41e277712baa278147ecb7559983c6d9faf7ae1"},
+ {file = "coverage-7.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:73419b89f812f498aca53f757dd834919b48ce4799f9d5cad33ca0ae442bdb1a"},
+ {file = "coverage-7.13.0-cp313-cp313-win_arm64.whl", hash = "sha256:eb76670874fdd6091eedcc856128ee48c41a9bbbb9c3f1c7c3cf169290e3ffd6"},
+ {file = "coverage-7.13.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:6e63ccc6e0ad8986386461c3c4b737540f20426e7ec932f42e030320896c311a"},
+ {file = "coverage-7.13.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:494f5459ffa1bd45e18558cd98710c36c0b8fbfa82a5eabcbe671d80ecffbfe8"},
+ {file = "coverage-7.13.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:06cac81bf10f74034e055e903f5f946e3e26fc51c09fc9f584e4a1605d977053"},
+ {file = "coverage-7.13.0-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f2ffc92b46ed6e6760f1d47a71e56b5664781bc68986dbd1836b2b70c0ce2071"},
+ {file = "coverage-7.13.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0602f701057c6823e5db1b74530ce85f17c3c5be5c85fc042ac939cbd909426e"},
+ {file = "coverage-7.13.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:25dc33618d45456ccb1d37bce44bc78cf269909aa14c4db2e03d63146a8a1493"},
+ {file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:71936a8b3b977ddd0b694c28c6a34f4fff2e9dd201969a4ff5d5fc7742d614b0"},
+ {file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:936bc20503ce24770c71938d1369461f0c5320830800933bc3956e2a4ded930e"},
+ {file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:af0a583efaacc52ae2521f8d7910aff65cdb093091d76291ac5820d5e947fc1c"},
+ {file = "coverage-7.13.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f1c23e24a7000da892a312fb17e33c5f94f8b001de44b7cf8ba2e36fbd15859e"},
+ {file = "coverage-7.13.0-cp313-cp313t-win32.whl", hash = "sha256:5f8a0297355e652001015e93be345ee54393e45dc3050af4a0475c5a2b767d46"},
+ {file = "coverage-7.13.0-cp313-cp313t-win_amd64.whl", hash = "sha256:6abb3a4c52f05e08460bd9acf04fec027f8718ecaa0d09c40ffbc3fbd70ecc39"},
+ {file = "coverage-7.13.0-cp313-cp313t-win_arm64.whl", hash = "sha256:3ad968d1e3aa6ce5be295ab5fe3ae1bf5bb4769d0f98a80a0252d543a2ef2e9e"},
+ {file = "coverage-7.13.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:453b7ec753cf5e4356e14fe858064e5520c460d3bbbcb9c35e55c0d21155c256"},
+ {file = "coverage-7.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:af827b7cbb303e1befa6c4f94fd2bf72f108089cfa0f8abab8f4ca553cf5ca5a"},
+ {file = "coverage-7.13.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:9987a9e4f8197a1000280f7cc089e3ea2c8b3c0a64d750537809879a7b4ceaf9"},
+ {file = "coverage-7.13.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:3188936845cd0cb114fa6a51842a304cdbac2958145d03be2377ec41eb285d19"},
+ {file = "coverage-7.13.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2bdb3babb74079f021696cb46b8bb5f5661165c385d3a238712b031a12355be"},
+ {file = "coverage-7.13.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:7464663eaca6adba4175f6c19354feea61ebbdd735563a03d1e472c7072d27bb"},
+ {file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8069e831f205d2ff1f3d355e82f511eb7c5522d7d413f5db5756b772ec8697f8"},
+ {file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:6fb2d5d272341565f08e962cce14cdf843a08ac43bd621783527adb06b089c4b"},
+ {file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:5e70f92ef89bac1ac8a99b3324923b4749f008fdbd7aa9cb35e01d7a284a04f9"},
+ {file = "coverage-7.13.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:4b5de7d4583e60d5fd246dd57fcd3a8aa23c6e118a8c72b38adf666ba8e7e927"},
+ {file = "coverage-7.13.0-cp314-cp314-win32.whl", hash = "sha256:a6c6e16b663be828a8f0b6c5027d36471d4a9f90d28444aa4ced4d48d7d6ae8f"},
+ {file = "coverage-7.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:0900872f2fdb3ee5646b557918d02279dc3af3dfb39029ac4e945458b13f73bc"},
+ {file = "coverage-7.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:3a10260e6a152e5f03f26db4a407c4c62d3830b9af9b7c0450b183615f05d43b"},
+ {file = "coverage-7.13.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:9097818b6cc1cfb5f174e3263eba4a62a17683bcfe5c4b5d07f4c97fa51fbf28"},
+ {file = "coverage-7.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0018f73dfb4301a89292c73be6ba5f58722ff79f51593352759c1790ded1cabe"},
+ {file = "coverage-7.13.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:166ad2a22ee770f5656e1257703139d3533b4a0b6909af67c6b4a3adc1c98657"},
+ {file = "coverage-7.13.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f6aaef16d65d1787280943f1c8718dc32e9cf141014e4634d64446702d26e0ff"},
+ {file = "coverage-7.13.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e999e2dcc094002d6e2c7bbc1fb85b58ba4f465a760a8014d97619330cdbbbf3"},
+ {file = "coverage-7.13.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:00c3d22cf6fb1cf3bf662aaaa4e563be8243a5ed2630339069799835a9cc7f9b"},
+ {file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22ccfe8d9bb0d6134892cbe1262493a8c70d736b9df930f3f3afae0fe3ac924d"},
+ {file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:9372dff5ea15930fea0445eaf37bbbafbc771a49e70c0aeed8b4e2c2614cc00e"},
+ {file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:69ac2c492918c2461bc6ace42d0479638e60719f2a4ef3f0815fa2df88e9f940"},
+ {file = "coverage-7.13.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:739c6c051a7540608d097b8e13c76cfa85263ced467168dc6b477bae3df7d0e2"},
+ {file = "coverage-7.13.0-cp314-cp314t-win32.whl", hash = "sha256:fe81055d8c6c9de76d60c94ddea73c290b416e061d40d542b24a5871bad498b7"},
+ {file = "coverage-7.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:445badb539005283825959ac9fa4a28f712c214b65af3a2c464f1adc90f5fcbc"},
+ {file = "coverage-7.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:de7f6748b890708578fc4b7bb967d810aeb6fcc9bff4bb77dbca77dab2f9df6a"},
+ {file = "coverage-7.13.0-py3-none-any.whl", hash = "sha256:850d2998f380b1e266459ca5b47bc9e7daf9af1d070f66317972f382d46f1904"},
+ {file = "coverage-7.13.0.tar.gz", hash = "sha256:a394aa27f2d7ff9bc04cf703817773a59ad6dfbd577032e690f961d2460ee936"},
]
[package.dependencies]
@@ -385,82 +252,16 @@ tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.1
toml = ["tomli"]
[[package]]
-name = "cryptography"
-version = "46.0.3"
-description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
+name = "defusedxml"
+version = "0.7.1"
+description = "XML bomb protection for Python stdlib modules"
optional = false
-python-versions = "!=3.9.0,!=3.9.1,>=3.8"
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
files = [
- {file = "cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a"},
- {file = "cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc"},
- {file = "cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d"},
- {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb"},
- {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849"},
- {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8"},
- {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec"},
- {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91"},
- {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e"},
- {file = "cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926"},
- {file = "cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71"},
- {file = "cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac"},
- {file = "cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018"},
- {file = "cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb"},
- {file = "cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c"},
- {file = "cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217"},
- {file = "cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5"},
- {file = "cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715"},
- {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54"},
- {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459"},
- {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422"},
- {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7"},
- {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044"},
- {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665"},
- {file = "cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3"},
- {file = "cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20"},
- {file = "cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de"},
- {file = "cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914"},
- {file = "cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db"},
- {file = "cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21"},
- {file = "cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936"},
- {file = "cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683"},
- {file = "cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d"},
- {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0"},
- {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc"},
- {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3"},
- {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971"},
- {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac"},
- {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04"},
- {file = "cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506"},
- {file = "cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963"},
- {file = "cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4"},
- {file = "cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df"},
- {file = "cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f"},
- {file = "cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372"},
- {file = "cryptography-46.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a23582810fedb8c0bc47524558fb6c56aac3fc252cb306072fd2815da2a47c32"},
- {file = "cryptography-46.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:e7aec276d68421f9574040c26e2a7c3771060bc0cff408bae1dcb19d3ab1e63c"},
- {file = "cryptography-46.0.3-pp311-pypy311_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7ce938a99998ed3c8aa7e7272dca1a610401ede816d36d0693907d863b10d9ea"},
- {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:191bb60a7be5e6f54e30ba16fdfae78ad3a342a0599eb4193ba88e3f3d6e185b"},
- {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:c70cc23f12726be8f8bc72e41d5065d77e4515efae3690326764ea1b07845cfb"},
- {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:9394673a9f4de09e28b5356e7fff97d778f8abad85c9d5ac4a4b7e25a0de7717"},
- {file = "cryptography-46.0.3-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:94cd0549accc38d1494e1f8de71eca837d0509d0d44bf11d158524b0e12cebf9"},
- {file = "cryptography-46.0.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6b5063083824e5509fdba180721d55909ffacccc8adbec85268b48439423d78c"},
- {file = "cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1"},
+ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"},
+ {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"},
]
-[package.dependencies]
-cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9\" and platform_python_implementation != \"PyPy\""}
-typing-extensions = {version = ">=4.13.2", markers = "python_full_version < \"3.11\""}
-
-[package.extras]
-docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"]
-docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
-nox = ["nox[uv] (>=2024.4.15)"]
-pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
-sdist = ["build (>=1.0.0)"]
-ssh = ["bcrypt (>=3.1.5)"]
-test = ["certifi (>=2024)", "cryptography-vectors (==46.0.3)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
-test-randomorder = ["pytest-randomly"]
-
[[package]]
name = "desktop-entry-lib"
version = "5.0"
@@ -475,17 +276,6 @@ files = [
[package.extras]
xdg-desktop-portal = ["jeepney"]
-[[package]]
-name = "docutils"
-version = "0.22.3"
-description = "Docutils -- Python Documentation Utilities"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "docutils-0.22.3-py3-none-any.whl", hash = "sha256:bd772e4aca73aff037958d44f2be5229ded4c09927fcf8690c577b66234d6ceb"},
- {file = "docutils-0.22.3.tar.gz", hash = "sha256:21486ae730e4ca9f622677b1412b879af1791efcfba517e4c6f60be543fc8cdd"},
-]
-
[[package]]
name = "exceptiongroup"
version = "1.3.1"
@@ -517,29 +307,6 @@ files = [
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
-[[package]]
-name = "importlib-metadata"
-version = "8.7.0"
-description = "Read metadata from Python packages"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "importlib_metadata-8.7.0-py3-none-any.whl", hash = "sha256:e5dd1551894c77868a30651cef00984d50e1002d06942a7101d34870c5f02afd"},
- {file = "importlib_metadata-8.7.0.tar.gz", hash = "sha256:d13b81ad223b890aa16c5471f2ac3056cf76c5f10f82d6f9292f0b415f389000"},
-]
-
-[package.dependencies]
-zipp = ">=3.20"
-
-[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
-cover = ["pytest-cov"]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-enabler = ["pytest-enabler (>=2.2)"]
-perf = ["ipython"]
-test = ["flufl.flake8", "importlib_resources (>=1.3)", "jaraco.test (>=5.4)", "packaging", "pyfakefs", "pytest (>=6,!=8.1.*)", "pytest-perf (>=0.9.2)"]
-type = ["pytest-mypy"]
-
[[package]]
name = "iniconfig"
version = "2.3.0"
@@ -552,185 +319,118 @@ files = [
]
[[package]]
-name = "jaraco-classes"
-version = "3.4.0"
-description = "Utility functions for Python class constructs"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "jaraco.classes-3.4.0-py3-none-any.whl", hash = "sha256:f662826b6bed8cace05e7ff873ce0f9283b5c924470fe664fff1c2f00f581790"},
- {file = "jaraco.classes-3.4.0.tar.gz", hash = "sha256:47a024b51d0239c0dd8c8540c6c7f484be3b8fcf0b2d85c13825780d3b3f3acd"},
-]
-
-[package.dependencies]
-more-itertools = "*"
-
-[package.extras]
-docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
-
-[[package]]
-name = "jaraco-context"
-version = "6.0.1"
-description = "Useful decorators and context managers"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "jaraco.context-6.0.1-py3-none-any.whl", hash = "sha256:f797fc481b490edb305122c9181830a3a5b76d84ef6d1aef2fb9b47ab956f9e4"},
- {file = "jaraco_context-6.0.1.tar.gz", hash = "sha256:9bae4ea555cf0b14938dc0aee7c9f32ed303aa20a3b73e7dc80111628792d1b3"},
-]
-
-[package.dependencies]
-"backports.tarfile" = {version = "*", markers = "python_version < \"3.12\""}
-
-[package.extras]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-test = ["portend", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"]
-
-[[package]]
-name = "jaraco-functools"
-version = "4.3.0"
-description = "Functools like those found in stdlib"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "jaraco_functools-4.3.0-py3-none-any.whl", hash = "sha256:227ff8ed6f7b8f62c56deff101545fa7543cf2c8e7b82a7c2116e672f29c26e8"},
- {file = "jaraco_functools-4.3.0.tar.gz", hash = "sha256:cfd13ad0dd2c47a3600b439ef72d8615d482cedcff1632930d6f28924d92f294"},
-]
-
-[package.dependencies]
-more_itertools = "*"
-
-[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
-cover = ["pytest-cov"]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-enabler = ["pytest-enabler (>=2.2)"]
-test = ["jaraco.classes", "pytest (>=6,!=8.1.*)"]
-type = ["pytest-mypy"]
-
-[[package]]
-name = "jeepney"
-version = "0.9.0"
-description = "Low-level, pure Python DBus protocol wrapper."
+name = "jinja2"
+version = "3.1.6"
+description = "A very fast and expressive template engine."
optional = false
python-versions = ">=3.7"
files = [
- {file = "jeepney-0.9.0-py3-none-any.whl", hash = "sha256:97e5714520c16fc0a45695e5365a2e11b81ea79bba796e26f9f1d178cb182683"},
- {file = "jeepney-0.9.0.tar.gz", hash = "sha256:cf0e9e845622b81e4a28df94c40345400256ec608d0e55bb8a3feaa9163f5732"},
-]
-
-[package.extras]
-test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"]
-trio = ["trio"]
-
-[[package]]
-name = "keyring"
-version = "25.7.0"
-description = "Store and access your passwords safely."
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "keyring-25.7.0-py3-none-any.whl", hash = "sha256:be4a0b195f149690c166e850609a477c532ddbfbaed96a404d4e43f8d5e2689f"},
- {file = "keyring-25.7.0.tar.gz", hash = "sha256:fe01bd85eb3f8fb3dd0405defdeac9a5b4f6f0439edbb3149577f244a2e8245b"},
+ {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"},
+ {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"},
]
[package.dependencies]
-importlib_metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""}
-"jaraco.classes" = "*"
-"jaraco.context" = "*"
-"jaraco.functools" = "*"
-jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""}
-pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""}
-SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""}
+MarkupSafe = ">=2.0"
[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
-completion = ["shtab (>=1.1.0)"]
-cover = ["pytest-cov"]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-enabler = ["pytest-enabler (>=3.4)"]
-test = ["pyfakefs", "pytest (>=6,!=8.1.*)"]
-type = ["pygobject-stubs", "pytest-mypy (>=1.0.1)", "shtab", "types-pywin32"]
+i18n = ["Babel (>=2.7)"]
[[package]]
-name = "markdown-it-py"
-version = "4.0.0"
-description = "Python port of markdown-it. Markdown parsing, done right!"
-optional = false
-python-versions = ">=3.10"
-files = [
- {file = "markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147"},
- {file = "markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3"},
-]
-
-[package.dependencies]
-mdurl = ">=0.1,<1.0"
-
-[package.extras]
-benchmarking = ["psutil", "pytest", "pytest-benchmark"]
-compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "markdown-it-pyrs", "mistletoe (>=1.0,<2.0)", "mistune (>=3.0,<4.0)", "panflute (>=2.3,<3.0)"]
-linkify = ["linkify-it-py (>=1,<3)"]
-plugins = ["mdit-py-plugins (>=0.5.0)"]
-profiling = ["gprof2dot"]
-rtd = ["ipykernel", "jupyter_sphinx", "mdit-py-plugins (>=0.5.0)", "myst-parser", "pyyaml", "sphinx", "sphinx-book-theme (>=1.0,<2.0)", "sphinx-copybutton", "sphinx-design"]
-testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions", "requests"]
-
-[[package]]
-name = "mdurl"
-version = "0.1.2"
-description = "Markdown URL utilities"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
- {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
-]
-
-[[package]]
-name = "more-itertools"
-version = "10.8.0"
-description = "More routines for operating on iterables, beyond itertools"
+name = "markupsafe"
+version = "3.0.3"
+description = "Safely add untrusted strings to HTML/XML markup."
optional = false
python-versions = ">=3.9"
files = [
- {file = "more_itertools-10.8.0-py3-none-any.whl", hash = "sha256:52d4362373dcf7c52546bc4af9a86ee7c4579df9a8dc268be0a2f949d376cc9b"},
- {file = "more_itertools-10.8.0.tar.gz", hash = "sha256:f638ddf8a1a0d134181275fb5d58b086ead7c6a72429ad725c67503f13ba30bd"},
-]
-
-[[package]]
-name = "nh3"
-version = "0.3.2"
-description = "Python binding to Ammonia HTML sanitizer Rust crate"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "nh3-0.3.2-cp314-cp314t-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:d18957a90806d943d141cc5e4a0fefa1d77cf0d7a156878bf9a66eed52c9cc7d"},
- {file = "nh3-0.3.2-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45c953e57028c31d473d6b648552d9cab1efe20a42ad139d78e11d8f42a36130"},
- {file = "nh3-0.3.2-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2c9850041b77a9147d6bbd6dbbf13eeec7009eb60b44e83f07fcb2910075bf9b"},
- {file = "nh3-0.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:403c11563e50b915d0efdb622866d1d9e4506bce590ef7da57789bf71dd148b5"},
- {file = "nh3-0.3.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:0dca4365db62b2d71ff1620ee4f800c4729849906c5dd504ee1a7b2389558e31"},
- {file = "nh3-0.3.2-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:0fe7ee035dd7b2290715baf29cb27167dddd2ff70ea7d052c958dbd80d323c99"},
- {file = "nh3-0.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:a40202fd58e49129764f025bbaae77028e420f1d5b3c8e6f6fd3a6490d513868"},
- {file = "nh3-0.3.2-cp314-cp314t-win32.whl", hash = "sha256:1f9ba555a797dbdcd844b89523f29cdc90973d8bd2e836ea6b962cf567cadd93"},
- {file = "nh3-0.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:dce4248edc427c9b79261f3e6e2b3ecbdd9b88c267012168b4a7b3fc6fd41d13"},
- {file = "nh3-0.3.2-cp314-cp314t-win_arm64.whl", hash = "sha256:019ecbd007536b67fdf76fab411b648fb64e2257ca3262ec80c3425c24028c80"},
- {file = "nh3-0.3.2-cp38-abi3-macosx_10_12_x86_64.macosx_11_0_arm64.macosx_10_12_universal2.whl", hash = "sha256:7064ccf5ace75825bd7bf57859daaaf16ed28660c1c6b306b649a9eda4b54b1e"},
- {file = "nh3-0.3.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8745454cdd28bbbc90861b80a0111a195b0e3961b9fa2e672be89eb199fa5d8"},
- {file = "nh3-0.3.2-cp38-abi3-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72d67c25a84579f4a432c065e8b4274e53b7cf1df8f792cf846abfe2c3090866"},
- {file = "nh3-0.3.2-cp38-abi3-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:13398e676a14d6233f372c75f52d5ae74f98210172991f7a3142a736bd92b131"},
- {file = "nh3-0.3.2-cp38-abi3-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:03d617e5c8aa7331bd2659c654e021caf9bba704b109e7b2b28b039a00949fe5"},
- {file = "nh3-0.3.2-cp38-abi3-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f2f55c4d2d5a207e74eefe4d828067bbb01300e06e2a7436142f915c5928de07"},
- {file = "nh3-0.3.2-cp38-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bb18403f02b655a1bbe4e3a4696c2ae1d6ae8f5991f7cacb684b1ae27e6c9f7"},
- {file = "nh3-0.3.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6d66f41672eb4060cf87c037f760bdbc6847852ca9ef8e9c5a5da18f090abf87"},
- {file = "nh3-0.3.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f97f8b25cb2681d25e2338148159447e4d689aafdccfcf19e61ff7db3905768a"},
- {file = "nh3-0.3.2-cp38-abi3-musllinux_1_2_armv7l.whl", hash = "sha256:2ab70e8c6c7d2ce953d2a58102eefa90c2d0a5ed7aa40c7e29a487bc5e613131"},
- {file = "nh3-0.3.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:1710f3901cd6440ca92494ba2eb6dc260f829fa8d9196b659fa10de825610ce0"},
- {file = "nh3-0.3.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:91e9b001101fb4500a2aafe3e7c92928d85242d38bf5ac0aba0b7480da0a4cd6"},
- {file = "nh3-0.3.2-cp38-abi3-win32.whl", hash = "sha256:169db03df90da63286e0560ea0efa9b6f3b59844a9735514a1d47e6bb2c8c61b"},
- {file = "nh3-0.3.2-cp38-abi3-win_amd64.whl", hash = "sha256:562da3dca7a17f9077593214a9781a94b8d76de4f158f8c895e62f09573945fe"},
- {file = "nh3-0.3.2-cp38-abi3-win_arm64.whl", hash = "sha256:cf5964d54edd405e68583114a7cba929468bcd7db5e676ae38ee954de1cfc104"},
- {file = "nh3-0.3.2.tar.gz", hash = "sha256:f394759a06df8b685a4ebfb1874fb67a9cbfd58c64fc5ed587a663c0e63ec376"},
+ {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"},
+ {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"},
+ {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"},
+ {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"},
+ {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"},
+ {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"},
+ {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"},
+ {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"},
+ {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"},
+ {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"},
+ {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"},
+ {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"},
+ {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"},
+ {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"},
+ {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"},
+ {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"},
+ {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"},
+ {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"},
+ {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"},
+ {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"},
+ {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"},
+ {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"},
+ {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"},
+ {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"},
+ {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"},
+ {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"},
+ {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"},
+ {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"},
+ {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"},
+ {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"},
+ {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"},
+ {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"},
+ {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"},
+ {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"},
+ {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"},
+ {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"},
+ {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"},
+ {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"},
+ {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"},
+ {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"},
+ {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"},
+ {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"},
+ {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"},
+ {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"},
+ {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"},
+ {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"},
+ {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"},
+ {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"},
+ {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"},
+ {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"},
+ {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"},
+ {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"},
+ {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"},
+ {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"},
+ {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"},
+ {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"},
+ {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"},
+ {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"},
+ {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"},
+ {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"},
+ {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"},
+ {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"},
+ {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"},
+ {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"},
+ {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"},
+ {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"},
+ {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"},
+ {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"},
+ {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"},
]
[[package]]
@@ -744,20 +444,6 @@ files = [
{file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"},
]
-[[package]]
-name = "pkginfo"
-version = "1.10.0"
-description = "Query metadata from sdists / bdists / installed packages."
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "pkginfo-1.10.0-py3-none-any.whl", hash = "sha256:889a6da2ed7ffc58ab5b900d888ddce90bce912f2d2de1dc1c26f4cb9fe65097"},
- {file = "pkginfo-1.10.0.tar.gz", hash = "sha256:5df73835398d10db79f8eecd5cd86b1f6d29317589ea70796994d49399af6297"},
-]
-
-[package.extras]
-testing = ["pytest", "pytest-cov", "wheel"]
-
[[package]]
name = "pluggy"
version = "1.6.0"
@@ -773,17 +459,6 @@ files = [
dev = ["pre-commit", "tox"]
testing = ["coverage", "pytest", "pytest-benchmark"]
-[[package]]
-name = "pycparser"
-version = "2.23"
-description = "C parser in Python"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934"},
- {file = "pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2"},
-]
-
[[package]]
name = "pygments"
version = "2.19.2"
@@ -814,48 +489,38 @@ desktop-entry-lib = "*"
requests = "*"
tomli = {version = "*", markers = "python_version < \"3.11\""}
-[[package]]
-name = "pyproject-hooks"
-version = "1.2.0"
-description = "Wrappers to call pyproject.toml-based build backend hooks."
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"},
- {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"},
-]
-
[[package]]
name = "pytest"
-version = "7.4.4"
+version = "8.4.2"
description = "pytest: simple powerful testing with Python"
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.9"
files = [
- {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"},
- {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"},
+ {file = "pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79"},
+ {file = "pytest-8.4.2.tar.gz", hash = "sha256:86c0d0b93306b961d58d62a4db4879f27fe25513d4b969df351abdddb3c30e01"},
]
[package.dependencies]
-colorama = {version = "*", markers = "sys_platform == \"win32\""}
-exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
-iniconfig = "*"
-packaging = "*"
-pluggy = ">=0.12,<2.0"
-tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+colorama = {version = ">=0.4", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1", markers = "python_version < \"3.11\""}
+iniconfig = ">=1"
+packaging = ">=20"
+pluggy = ">=1.5,<2"
+pygments = ">=2.7.2"
+tomli = {version = ">=1", markers = "python_version < \"3.11\""}
[package.extras]
-testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "requests", "setuptools", "xmlschema"]
[[package]]
name = "pytest-cov"
-version = "4.1.0"
+version = "5.0.0"
description = "Pytest plugin for measuring coverage."
optional = false
-python-versions = ">=3.7"
+python-versions = ">=3.8"
files = [
- {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"},
- {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"},
+ {file = "pytest-cov-5.0.0.tar.gz", hash = "sha256:5837b58e9f6ebd335b0f8060eecce69b662415b16dc503883a02f45dfeb14857"},
+ {file = "pytest_cov-5.0.0-py3-none-any.whl", hash = "sha256:4f0764a1219df53214206bf1feea4633c3b558a2925c8b59f144f682861ce652"},
]
[package.dependencies]
@@ -863,18 +528,7 @@ coverage = {version = ">=5.2.1", extras = ["toml"]}
pytest = ">=4.6"
[package.extras]
-testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"]
-
-[[package]]
-name = "pywin32-ctypes"
-version = "0.2.3"
-description = "A (partial) reimplementation of pywin32 using ctypes/cffi"
-optional = false
-python-versions = ">=3.6"
-files = [
- {file = "pywin32-ctypes-0.2.3.tar.gz", hash = "sha256:d162dc04946d704503b2edc4d55f3dba5c1d539ead017afa00142c38b9885755"},
- {file = "pywin32_ctypes-0.2.3-py3-none-any.whl", hash = "sha256:8a1513379d709975552d202d942d9837758905c8d01eb82b8bcc30918929e7b8"},
-]
+testing = ["fields", "hunter", "process-tests", "pytest-xdist", "virtualenv"]
[[package]]
name = "pyyaml"
@@ -958,25 +612,6 @@ files = [
{file = "pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f"},
]
-[[package]]
-name = "readme-renderer"
-version = "44.0"
-description = "readme_renderer is a library for rendering readme descriptions for Warehouse"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "readme_renderer-44.0-py3-none-any.whl", hash = "sha256:2fbca89b81a08526aadf1357a8c2ae889ec05fb03f5da67f9769c9a592166151"},
- {file = "readme_renderer-44.0.tar.gz", hash = "sha256:8712034eabbfa6805cacf1402b4eeb2a73028f72d1166d6f5cb7f9c047c5d1e1"},
-]
-
-[package.dependencies]
-docutils = ">=0.21.2"
-nh3 = ">=0.2.14"
-Pygments = ">=2.5.1"
-
-[package.extras]
-md = ["cmarkgfm (>=0.8.0)"]
-
[[package]]
name = "requests"
version = "2.32.5"
@@ -998,67 +633,6 @@ urllib3 = ">=1.21.1,<3"
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
-[[package]]
-name = "requests-toolbelt"
-version = "1.0.0"
-description = "A utility belt for advanced users of python-requests"
-optional = false
-python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-files = [
- {file = "requests-toolbelt-1.0.0.tar.gz", hash = "sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6"},
- {file = "requests_toolbelt-1.0.0-py2.py3-none-any.whl", hash = "sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06"},
-]
-
-[package.dependencies]
-requests = ">=2.0.1,<3.0.0"
-
-[[package]]
-name = "rfc3986"
-version = "2.0.0"
-description = "Validating URI References per RFC 3986"
-optional = false
-python-versions = ">=3.7"
-files = [
- {file = "rfc3986-2.0.0-py2.py3-none-any.whl", hash = "sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd"},
- {file = "rfc3986-2.0.0.tar.gz", hash = "sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c"},
-]
-
-[package.extras]
-idna2008 = ["idna"]
-
-[[package]]
-name = "rich"
-version = "14.2.0"
-description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
-optional = false
-python-versions = ">=3.8.0"
-files = [
- {file = "rich-14.2.0-py3-none-any.whl", hash = "sha256:76bc51fe2e57d2b1be1f96c524b890b816e334ab4c1e45888799bfaab0021edd"},
- {file = "rich-14.2.0.tar.gz", hash = "sha256:73ff50c7c0c1c77c8243079283f4edb376f0f6442433aecb8ce7e6d0b92d1fe4"},
-]
-
-[package.dependencies]
-markdown-it-py = ">=2.2.0"
-pygments = ">=2.13.0,<3.0.0"
-
-[package.extras]
-jupyter = ["ipywidgets (>=7.5.1,<9)"]
-
-[[package]]
-name = "secretstorage"
-version = "3.5.0"
-description = "Python bindings to FreeDesktop.org Secret Service API"
-optional = false
-python-versions = ">=3.10"
-files = [
- {file = "secretstorage-3.5.0-py3-none-any.whl", hash = "sha256:0ce65888c0725fcb2c5bc0fdb8e5438eece02c523557ea40ce0703c266248137"},
- {file = "secretstorage-3.5.0.tar.gz", hash = "sha256:f04b8e4689cbce351744d5537bf6b1329c6fc68f91fa666f60a380edddcd11be"},
-]
-
-[package.dependencies]
-cryptography = ">=2.0"
-jeepney = ">=0.6"
-
[[package]]
name = "tomli"
version = "2.3.0"
@@ -1110,28 +684,6 @@ files = [
{file = "tomli-2.3.0.tar.gz", hash = "sha256:64be704a875d2a59753d80ee8a533c3fe183e3f06807ff7dc2232938ccb01549"},
]
-[[package]]
-name = "twine"
-version = "5.1.1"
-description = "Collection of utilities for publishing packages on PyPI"
-optional = false
-python-versions = ">=3.8"
-files = [
- {file = "twine-5.1.1-py3-none-any.whl", hash = "sha256:215dbe7b4b94c2c50a7315c0275d2258399280fbb7d04182c7e55e24b5f93997"},
- {file = "twine-5.1.1.tar.gz", hash = "sha256:9aa0825139c02b3434d913545c7b847a21c835e11597f5255842d457da2322db"},
-]
-
-[package.dependencies]
-importlib-metadata = ">=3.6"
-keyring = ">=15.1"
-pkginfo = ">=1.8.1,<1.11"
-readme-renderer = ">=35.0"
-requests = ">=2.20"
-requests-toolbelt = ">=0.8.0,<0.9.0 || >0.9.0"
-rfc3986 = ">=1.4.0"
-rich = ">=12.0.0"
-urllib3 = ">=1.26.0"
-
[[package]]
name = "typing-extensions"
version = "4.15.0"
@@ -1145,41 +697,22 @@ files = [
[[package]]
name = "urllib3"
-version = "2.5.0"
+version = "2.6.2"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = ">=3.9"
files = [
- {file = "urllib3-2.5.0-py3-none-any.whl", hash = "sha256:e6b01673c0fa6a13e374b50871808eb3bf7046c4b125b216f6bf1cc604cff0dc"},
- {file = "urllib3-2.5.0.tar.gz", hash = "sha256:3fc47733c7e419d4bc3f6b3dc2b4f890bb743906a30d56ba4a5bfa4bbff92760"},
+ {file = "urllib3-2.6.2-py3-none-any.whl", hash = "sha256:ec21cddfe7724fc7cb4ba4bea7aa8e2ef36f607a4bab81aa6ce42a13dc3f03dd"},
+ {file = "urllib3-2.6.2.tar.gz", hash = "sha256:016f9c98bb7e98085cb2b4b17b87d2c702975664e4f060c6532e64d1c1a5e797"},
]
[package.extras]
-brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"]
+brotli = ["brotli (>=1.2.0)", "brotlicffi (>=1.2.0.0)"]
h2 = ["h2 (>=4,<5)"]
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
-zstd = ["zstandard (>=0.18.0)"]
-
-[[package]]
-name = "zipp"
-version = "3.23.0"
-description = "Backport of pathlib-compatible object wrapper for zip files"
-optional = false
-python-versions = ">=3.9"
-files = [
- {file = "zipp-3.23.0-py3-none-any.whl", hash = "sha256:071652d6115ed432f5ce1d34c336c0adfd6a884660d1e9712a256d3d3bd4b14e"},
- {file = "zipp-3.23.0.tar.gz", hash = "sha256:a07157588a12518c9d4034df3fbbee09c814741a33ff63c05fa29d26a2404166"},
-]
-
-[package.extras]
-check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"]
-cover = ["pytest-cov"]
-doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
-enabler = ["pytest-enabler (>=2.2)"]
-test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more_itertools", "pytest (>=6,!=8.1.*)", "pytest-ignore-flaky"]
-type = ["pytest-mypy"]
+zstd = ["backports-zstd (>=1.0.0)"]
[metadata]
lock-version = "2.0"
python-versions = "^3.10"
-content-hash = "17e97a5516576384aafd227385b42be9178527537a52ab44e8797816534b5193"
+content-hash = "026c4acd254e889b70bb8c25ffb5e6323eee86380f54f2d8ef02f59ae9307529"
diff --git a/pyproject.toml b/pyproject.toml
index bd3db91..2d29795 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,13 +1,13 @@
[tool.poetry]
name = "jinjaturtle"
-version = "0.1.2"
+version = "0.3.3"
description = "Convert config files into Ansible defaults and Jinja2 templates."
authors = ["Miguel Jacq "]
license = "GPL-3.0-or-later"
readme = "README.md"
packages = [{ include = "jinjaturtle", from = "src" }]
-keywords = ["ansible", "jinja2", "config", "toml", "ini", "devops"]
+keywords = ["ansible", "jinja2", "config", "toml", "ini", "yaml", "json", "devops"]
homepage = "https://git.mig5.net/mig5/jinjaturtle"
repository = "https://git.mig5.net/mig5/jinjaturtle"
@@ -16,13 +16,8 @@ repository = "https://git.mig5.net/mig5/jinjaturtle"
python = "^3.10"
PyYAML = "^6.0"
tomli = { version = "^2.0.0", python = "<3.11" }
-
-[tool.poetry.group.dev.dependencies]
-pytest = "^7.0"
-pytest-cov = "^4.0"
-build = "^1.0"
-twine = "^5.0"
-pyproject-appimage = "^4.2"
+defusedxml = "^0.7.1"
+jinja2 = "^3.1.6"
[tool.poetry.scripts]
jinjaturtle = "jinjaturtle.cli:main"
@@ -34,3 +29,8 @@ build-backend = "poetry.core.masonry.api"
[tool.pyproject-appimage]
script = "jinjaturtle"
output = "JinjaTurtle.AppImage"
+
+[tool.poetry.dev-dependencies]
+pytest = "^8"
+pytest-cov = "^5"
+pyproject-appimage = "^4.2"
diff --git a/release.sh b/release.sh
index 7e8521c..8133992 100755
--- a/release.sh
+++ b/release.sh
@@ -2,7 +2,8 @@
set -eo pipefail
-rm -rf dist
+# Clean caches etc
+filedust -y .
# Publish to Pypi
poetry build
@@ -14,3 +15,30 @@ mv JinjaTurtle.AppImage dist/
# Sign packages
for file in `ls -1 dist/`; do qubes-gpg-client --batch --armor --detach-sign dist/$file > dist/$file.asc; done
+
+# Deb stuff
+DISTS=(
+ debian:bookworm
+ debian:trixie
+ ubuntu:jammy
+ ubuntu:noble
+)
+
+for dist in ${DISTS[@]}; do
+ release=$(echo ${dist} | cut -d: -f2)
+ mkdir -p dist/${release}
+
+ docker build -f Dockerfile.debbuild -t jinjaturtle-deb:${release} \
+ --no-cache \
+ --progress=plain \
+ --build-arg BASE_IMAGE=${dist} .
+
+ docker run --rm \
+ -e SUITE="${release}" \
+ -v "$PWD":/src \
+ -v "$PWD/dist/${release}":/out \
+ jinjaturtle-deb:${release}
+
+ debfile=$(ls -1 dist/${release}/*.deb)
+ reprepro -b /home/user/git/repo includedeb "${release}" "${debfile}"
+done
diff --git a/src/jinjaturtle/cli.py b/src/jinjaturtle/cli.py
index 5c59a87..c222e86 100644
--- a/src/jinjaturtle/cli.py
+++ b/src/jinjaturtle/cli.py
@@ -2,13 +2,15 @@ from __future__ import annotations
import argparse
import sys
+from defusedxml import defuse_stdlib
from pathlib import Path
from .core import (
parse_config,
+ analyze_loops,
flatten_config,
- generate_defaults_yaml,
- generate_template,
+ generate_ansible_yaml,
+ generate_jinja2_template,
)
@@ -19,7 +21,7 @@ def _build_arg_parser() -> argparse.ArgumentParser:
)
ap.add_argument(
"config",
- help="Path to the source configuration file (TOML or INI-style).",
+ help="Path to the source configuration file (TOML, YAML, JSON or INI-style).",
)
ap.add_argument(
"-r",
@@ -30,7 +32,7 @@ def _build_arg_parser() -> argparse.ArgumentParser:
ap.add_argument(
"-f",
"--format",
- choices=["ini", "toml"],
+ choices=["ini", "json", "toml", "yaml", "xml"],
help="Force config format instead of auto-detecting from filename.",
)
ap.add_argument(
@@ -47,23 +49,39 @@ def _build_arg_parser() -> argparse.ArgumentParser:
def _main(argv: list[str] | None = None) -> int:
+ defuse_stdlib()
parser = _build_arg_parser()
args = parser.parse_args(argv)
config_path = Path(args.config)
- fmt, parsed = parse_config(config_path, args.format)
- flat_items = flatten_config(fmt, parsed)
- defaults_yaml = generate_defaults_yaml(args.role_name, flat_items)
config_text = config_path.read_text(encoding="utf-8")
- template_str = generate_template(
- fmt, parsed, args.role_name, original_text=config_text
+
+ # Parse the config
+ fmt, parsed = parse_config(config_path, args.format)
+
+ # Analyze for loops
+ loop_candidates = analyze_loops(fmt, parsed)
+
+ # Flatten config (excluding loop paths if loops are detected)
+ flat_items = flatten_config(fmt, parsed, loop_candidates)
+
+ # Generate defaults YAML (with loop collections if detected)
+ ansible_yaml = generate_ansible_yaml(args.role_name, flat_items, loop_candidates)
+
+ # Generate template (with loops if detected)
+ template_str = generate_jinja2_template(
+ fmt,
+ parsed,
+ args.role_name,
+ original_text=config_text,
+ loop_candidates=loop_candidates,
)
if args.defaults_output:
- Path(args.defaults_output).write_text(defaults_yaml, encoding="utf-8")
+ Path(args.defaults_output).write_text(ansible_yaml, encoding="utf-8")
else:
print("# defaults/main.yml")
- print(defaults_yaml, end="")
+ print(ansible_yaml, end="")
if args.template_output:
Path(args.template_output).write_text(template_str, encoding="utf-8")
@@ -71,14 +89,11 @@ def _main(argv: list[str] | None = None) -> int:
print("# config.j2")
print(template_str, end="")
- return 0
+ return True
def main() -> None:
"""
Console-script entry point.
-
- Defined in pyproject.toml as:
- jinjaturtle = jinjaturtle.cli:main
"""
- raise SystemExit(_main(sys.argv[1:]))
+ _main(sys.argv[1:])
diff --git a/src/jinjaturtle/core.py b/src/jinjaturtle/core.py
index 03b159b..e4f3d13 100644
--- a/src/jinjaturtle/core.py
+++ b/src/jinjaturtle/core.py
@@ -1,33 +1,41 @@
from __future__ import annotations
-import configparser
-import json
from pathlib import Path
from typing import Any, Iterable
+
+import datetime
import yaml
-try:
- from ruamel.yaml import YAML as RuamelYAML # for comment-preserving YAML
-except ImportError: # pragma: no cover
- RuamelYAML = None
-
-try:
- import tomllib # Python 3.11+
-except ModuleNotFoundError: # pragma: no cover
- try:
- import tomli as tomllib # type: ignore
- except ModuleNotFoundError: # pragma: no cover
- tomllib = None # type: ignore
+from .loop_analyzer import LoopAnalyzer, LoopCandidate
+from .handlers import (
+ BaseHandler,
+ IniHandler,
+ JsonHandler,
+ TomlHandler,
+ YamlHandler,
+ XmlHandler,
+)
class QuotedString(str):
- """Marker type for strings that must be double-quoted in YAML output."""
+ """
+ Marker type for strings that must be double-quoted in YAML output.
+ """
pass
+def _fallback_str_representer(dumper: yaml.SafeDumper, data: Any):
+ """
+ Fallback for objects the dumper doesn't know about.
+ """
+ return dumper.represent_scalar("tag:yaml.org,2002:str", str(data))
+
+
class _TurtleDumper(yaml.SafeDumper):
- """Custom YAML dumper that always double-quotes QuotedString values."""
+ """
+ Custom YAML dumper that always double-quotes QuotedString values.
+ """
pass
@@ -37,11 +45,34 @@ def _quoted_str_representer(dumper: yaml.SafeDumper, data: QuotedString):
_TurtleDumper.add_representer(QuotedString, _quoted_str_representer)
+# Use our fallback for any unknown object types
+_TurtleDumper.add_representer(None, _fallback_str_representer)
+
+_HANDLERS: dict[str, BaseHandler] = {}
+
+_INI_HANDLER = IniHandler()
+_JSON_HANDLER = JsonHandler()
+_TOML_HANDLER = TomlHandler()
+_YAML_HANDLER = YamlHandler()
+_XML_HANDLER = XmlHandler()
+
+_HANDLERS["ini"] = _INI_HANDLER
+_HANDLERS["json"] = _JSON_HANDLER
+_HANDLERS["toml"] = _TOML_HANDLER
+_HANDLERS["yaml"] = _YAML_HANDLER
+_HANDLERS["xml"] = _XML_HANDLER
+
+
+def make_var_name(role_prefix: str, path: Iterable[str]) -> str:
+ """
+ Wrapper for :meth:`BaseHandler.make_var_name`.
+ """
+ return BaseHandler.make_var_name(role_prefix, path)
def detect_format(path: Path, explicit: str | None = None) -> str:
"""
- Determine config format (toml, yaml, ini-ish) from argument or filename.
+ Determine config format from argument or filename.
"""
if explicit:
return explicit
@@ -55,182 +86,102 @@ def detect_format(path: Path, explicit: str | None = None) -> str:
return "json"
if suffix in {".ini", ".cfg", ".conf"} or name.endswith(".ini"):
return "ini"
+ if suffix == ".xml":
+ return "xml"
# Fallback: treat as INI-ish
return "ini"
def parse_config(path: Path, fmt: str | None = None) -> tuple[str, Any]:
"""
- Parse config file into a Python object:
-
- TOML -> nested dict
- INI -> configparser.ConfigParser
+ Parse config file into a Python object.
"""
fmt = detect_format(path, fmt)
+ handler = _HANDLERS.get(fmt)
+ if handler is None:
+ raise ValueError(f"Unsupported config format: {fmt}")
+ parsed = handler.parse(path)
+ # Make sure datetime objects are treated as strings (TOML, YAML)
+ parsed = _stringify_timestamps(parsed)
- if fmt == "toml":
- if tomllib is None:
- raise RuntimeError(
- "tomllib/tomli is required to parse TOML files but is not installed"
- )
- with path.open("rb") as f:
- data = tomllib.load(f)
- return fmt, data
-
- if fmt == "yaml":
- text = path.read_text(encoding="utf-8")
- if RuamelYAML is not None:
- # ruamel.yaml preserves comments; we'll reuse them in template gen
- y = RuamelYAML()
- y.preserve_quotes = True
- data = y.load(text) or {}
- else:
- # Fallback: PyYAML (drops comments in parsed structure, but we still
- # have the original text for comment-preserving template generation).
- data = yaml.safe_load(text) or {}
- return fmt, data
-
- if fmt == "json":
- with path.open("r", encoding="utf-8") as f:
- data = json.load(f)
- return fmt, data
-
- if fmt == "ini":
- parser = configparser.ConfigParser()
- parser.optionxform = str # preserve key case
- with path.open("r", encoding="utf-8") as f:
- parser.read_file(f)
- return fmt, parser
-
- raise ValueError(f"Unsupported config format: {fmt}")
+ return fmt, parsed
-def flatten_config(fmt: str, parsed: Any) -> list[tuple[tuple[str, ...], Any]]:
+def analyze_loops(fmt: str, parsed: Any) -> list[LoopCandidate]:
"""
- Flatten parsed config into a list of (path_tuple, value).
-
- Examples:
- TOML: [server.tls] enabled = true
- -> (("server", "tls", "enabled"), True)
-
- INI: [somesection] foo = "bar"
- -> (("somesection", "foo"), "bar")
-
- For INI, values are processed as strings (quotes stripped when obvious).
+ Analyze parsed config to find loop opportunities.
"""
- items: list[tuple[tuple[str, ...], Any]] = []
+ analyzer = LoopAnalyzer()
+ candidates = analyzer.analyze(parsed, fmt)
- if fmt in {"toml", "yaml", "json"}:
+ # Filter by confidence threshold
+ return [c for c in candidates if c.confidence >= LoopAnalyzer.MIN_CONFIDENCE]
- def _walk(obj: Any, path: tuple[str, ...] = ()) -> None:
- if isinstance(obj, dict):
- for k, v in obj.items():
- _walk(v, path + (str(k),))
- elif isinstance(obj, list) and fmt in {"yaml", "json"}:
- # for YAML/JSON, flatten lists so each element can be templated;
- # TOML still treats list as a single scalar (ports = [..]) which is fine.
- for i, v in enumerate(obj):
- _walk(v, path + (str(i),))
- else:
- items.append((path, obj))
- _walk(parsed)
+def flatten_config(
+ fmt: str, parsed: Any, loop_candidates: list[LoopCandidate] | None = None
+) -> list[tuple[tuple[str, ...], Any]]:
+ """
+ Flatten parsed config into (path, value) pairs.
- elif fmt == "ini":
- parser: configparser.ConfigParser = parsed
- for section in parser.sections():
- for key, value in parser.items(section, raw=True):
- raw = value.strip()
- # Strip surrounding quotes from INI values for defaults
- if len(raw) >= 2 and raw[0] == raw[-1] and raw[0] in {'"', "'"}:
- processed: Any = raw[1:-1]
- else:
- processed = raw
- items.append(((section, key), processed))
- else: # pragma: no cover
+ If loop_candidates is provided, paths within those loops are excluded
+ from flattening (they'll be handled via loops in the template).
+ """
+ handler = _HANDLERS.get(fmt)
+ if handler is None:
raise ValueError(f"Unsupported format: {fmt}")
- return items
+ all_items = handler.flatten(parsed)
+
+ if not loop_candidates:
+ return all_items
+
+ # Build set of paths to exclude (anything under a loop path)
+ excluded_prefixes = {candidate.path for candidate in loop_candidates}
+
+ # Filter out items that fall under loop paths
+ filtered_items = []
+ for item_path, value in all_items:
+ # Check if this path starts with any loop path
+ is_excluded = False
+ for loop_path in excluded_prefixes:
+ if _path_starts_with(item_path, loop_path):
+ is_excluded = True
+ break
+
+ if not is_excluded:
+ filtered_items.append((item_path, value))
+
+ return filtered_items
-def make_var_name(role_prefix: str, path: Iterable[str]) -> str:
- """
- Build an Ansible var name like:
- role_prefix_section_subsection_key
-
- Sanitises parts to lowercase [a-z0-9_] and strips extras.
- """
- role_prefix = role_prefix.strip().lower()
- clean_parts: list[str] = []
-
- for part in path:
- part = str(part).strip()
- part = part.replace(" ", "_")
- cleaned_chars: list[str] = []
- for c in part:
- if c.isalnum() or c == "_":
- cleaned_chars.append(c.lower())
- else:
- cleaned_chars.append("_")
- cleaned_part = "".join(cleaned_chars).strip("_")
- if cleaned_part:
- clean_parts.append(cleaned_part)
-
- if clean_parts:
- return role_prefix + "_" + "_".join(clean_parts)
- return role_prefix
+def _path_starts_with(path: tuple[str, ...], prefix: tuple[str, ...]) -> bool:
+ """Check if path starts with prefix."""
+ if len(path) < len(prefix):
+ return False
+ return path[: len(prefix)] == prefix
-def _split_inline_comment(text: str, comment_chars: set[str]) -> tuple[str, str]:
- """
- Split 'value # comment' into (value_part, comment_part), where
- comment_part starts at the first unquoted comment character.
-
- comment_chars is e.g. {'#'} for TOML, {'#', ';'} for INI.
- """
- in_single = False
- in_double = False
- for i, ch in enumerate(text):
- if ch == "'" and not in_double:
- in_single = not in_single
- elif ch == '"' and not in_single:
- in_double = not in_double
- elif ch in comment_chars and not in_single and not in_double:
- return text[:i], text[i:]
- return text, ""
-
-
-def _normalize_default_value(value: Any) -> Any:
- """
- Ensure that 'true' / 'false' end up as quoted strings in YAML, not booleans.
-
- - bool -> QuotedString("true"/"false")
- - "true"/"false" (any case) -> QuotedString(original_text)
- - everything else -> unchanged
- """
- if isinstance(value, bool):
- # YAML booleans are lower-case; we keep them as strings.
- return QuotedString("true" if value else "false")
- if isinstance(value, str) and value.lower() in {"true", "false"}:
- return QuotedString(value)
- return value
-
-
-def generate_defaults_yaml(
+def generate_ansible_yaml(
role_prefix: str,
flat_items: list[tuple[tuple[str, ...], Any]],
+ loop_candidates: list[LoopCandidate] | None = None,
) -> str:
"""
- Create YAML for defaults/main.yml from flattened items.
-
- Boolean/boolean-like values ("true"/"false") are forced to be *strings*
- and double-quoted in the resulting YAML so that Ansible does not coerce
- them back into Python booleans.
+ Create Ansible YAML for defaults/main.yml.
"""
defaults: dict[str, Any] = {}
+
+ # Add scalar variables
for path, value in flat_items:
var_name = make_var_name(role_prefix, path)
- defaults[var_name] = _normalize_default_value(value)
+ defaults[var_name] = value # No normalization - keep original types
+
+ # Add loop collections
+ if loop_candidates:
+ for candidate in loop_candidates:
+ var_name = make_var_name(role_prefix, candidate.path)
+ defaults[var_name] = candidate.items
return yaml.dump(
defaults,
@@ -243,488 +194,54 @@ def generate_defaults_yaml(
)
-def _generate_toml_template(role_prefix: str, data: dict[str, Any]) -> str:
- """
- Generate a TOML Jinja2 template from parsed TOML dict.
-
- Values become Jinja placeholders, with quoting preserved for strings:
- foo = "bar" -> foo = "{{ prefix_foo }}"
- port = 8080 -> port = {{ prefix_port }}
- """
- lines: list[str] = []
-
- def emit_kv(path: tuple[str, ...], key: str, value: Any) -> None:
- var_name = make_var_name(role_prefix, path + (key,))
- if isinstance(value, str):
- lines.append(f'{key} = "{{{{ {var_name} }}}}"')
- else:
- lines.append(f"{key} = {{{{ {var_name} }}}}")
-
- def walk(obj: dict[str, Any], path: tuple[str, ...] = ()) -> None:
- scalar_items = {k: v for k, v in obj.items() if not isinstance(v, dict)}
- nested_items = {k: v for k, v in obj.items() if isinstance(v, dict)}
-
- if path:
- header = ".".join(path)
- lines.append(f"[{header}]")
-
- for key, val in scalar_items.items():
- emit_kv(path, str(key), val)
-
- if scalar_items:
- lines.append("")
-
- for key, val in nested_items.items():
- walk(val, path + (str(key),))
-
- # Root scalars (no table header)
- root_scalars = {k: v for k, v in data.items() if not isinstance(v, dict)}
- for key, val in root_scalars.items():
- emit_kv((), str(key), val)
- if root_scalars:
- lines.append("")
-
- # Tables
- for key, val in data.items():
- if isinstance(val, dict):
- walk(val, (str(key),))
-
- return "\n".join(lines).rstrip() + "\n"
-
-
-def _generate_ini_template(role_prefix: str, parser: configparser.ConfigParser) -> str:
- """
- Generate an INI-style Jinja2 template from a ConfigParser.
-
- Quoting heuristic:
- foo = "bar" -> foo = "{{ prefix_section_foo }}"
- num = 42 -> num = {{ prefix_section_num }}
- """
- lines: list[str] = []
-
- for section in parser.sections():
- lines.append(f"[{section}]")
- for key, value in parser.items(section, raw=True):
- path = (section, key)
- var_name = make_var_name(role_prefix, path)
- value = value.strip()
- if len(value) >= 2 and value[0] == value[-1] and value[0] in {'"', "'"}:
- lines.append(f'{key} = "{{{{ {var_name} }}}}"')
- else:
- lines.append(f"{key} = {{{{ {var_name} }}}}")
- lines.append("")
-
- return "\n".join(lines).rstrip() + "\n"
-
-
-def _generate_ini_template_from_text(role_prefix: str, text: str) -> str:
- """
- Generate a Jinja2 template for an INI/php.ini-style file, preserving
- comments, blank lines, and section headers by patching values in-place.
- """
- lines = text.splitlines(keepends=True)
- current_section: str | None = None
- out_lines: list[str] = []
-
- for raw_line in lines:
- line = raw_line
- stripped = line.lstrip()
-
- # Blank or pure comment: keep as-is
- if not stripped or stripped[0] in {"#", ";"}:
- out_lines.append(raw_line)
- continue
-
- # Section header
- if stripped.startswith("[") and "]" in stripped:
- header_inner = stripped[1 : stripped.index("]")]
- current_section = header_inner.strip()
- out_lines.append(raw_line)
- continue
-
- # Work without newline so we can re-attach it exactly
- newline = ""
- content = raw_line
- if content.endswith("\r\n"):
- newline = "\r\n"
- content = content[:-2]
- elif content.endswith("\n"):
- newline = content[-1]
- content = content[:-1]
-
- eq_index = content.find("=")
- if eq_index == -1:
- # Not a simple key=value line: leave untouched
- out_lines.append(raw_line)
- continue
-
- before_eq = content[:eq_index]
- after_eq = content[eq_index + 1 :]
-
- key = before_eq.strip()
- if not key:
- out_lines.append(raw_line)
- continue
-
- # Whitespace after '='
- value_ws_len = len(after_eq) - len(after_eq.lstrip(" \t"))
- leading_ws = after_eq[:value_ws_len]
- value_and_comment = after_eq[value_ws_len:]
-
- value_part, comment_part = _split_inline_comment(value_and_comment, {"#", ";"})
- raw_value = value_part.strip()
-
- path = (key,) if current_section is None else (current_section, key)
- var_name = make_var_name(role_prefix, path)
-
- # Was the original value quoted?
- use_quotes = (
- len(raw_value) >= 2
- and raw_value[0] == raw_value[-1]
- and raw_value[0] in {'"', "'"}
- )
-
- if use_quotes:
- quote_char = raw_value[0]
- replacement_value = f"{quote_char}{{{{ {var_name} }}}}{quote_char}"
- else:
- replacement_value = f"{{{{ {var_name} }}}}"
-
- new_content = before_eq + "=" + leading_ws + replacement_value + comment_part
- out_lines.append(new_content + newline)
-
- return "".join(out_lines)
-
-
-def _generate_toml_template_from_text(role_prefix: str, text: str) -> str:
- """
- Generate a Jinja2 template for a TOML file, preserving comments,
- blank lines, and table headers by patching values in-place.
-
- Handles inline tables like:
- temp_targets = { cpu = 79.5, case = 72.0 }
-
- by mapping them to:
- temp_targets = { cpu = {{ prefix_database_temp_targets_cpu }},
- case = {{ prefix_database_temp_targets_case }} }
- """
- lines = text.splitlines(keepends=True)
- current_table: tuple[str, ...] = ()
- out_lines: list[str] = []
-
- for raw_line in lines:
- line = raw_line
- stripped = line.lstrip()
-
- # Blank or pure comment
- if not stripped or stripped.startswith("#"):
- out_lines.append(raw_line)
- continue
-
- # Table header: [server] or [server.tls] or [[array.of.tables]]
- if stripped.startswith("[") and "]" in stripped:
- header = stripped
- first_bracket = header.find("[")
- closing_bracket = header.find("]", first_bracket + 1)
- if first_bracket != -1 and closing_bracket != -1:
- inner = header[first_bracket + 1 : closing_bracket].strip()
- inner = inner.strip("[]") # handle [[table]] as well
- parts = [p.strip() for p in inner.split(".") if p.strip()]
- current_table = tuple(parts)
- out_lines.append(raw_line)
- continue
-
- # Try key = value
- newline = ""
- content = raw_line
- if content.endswith("\r\n"):
- newline = "\r\n"
- content = content[:-2]
- elif content.endswith("\n"):
- newline = content[-1]
- content = content[:-1]
-
- eq_index = content.find("=")
- if eq_index == -1:
- out_lines.append(raw_line)
- continue
-
- before_eq = content[:eq_index]
- after_eq = content[eq_index + 1 :]
-
- key = before_eq.strip()
- if not key:
- out_lines.append(raw_line)
- continue
-
- # Whitespace after '='
- value_ws_len = len(after_eq) - len(after_eq.lstrip(" \t"))
- leading_ws = after_eq[:value_ws_len]
- value_and_comment = after_eq[value_ws_len:]
-
- value_part, comment_part = _split_inline_comment(value_and_comment, {"#"})
- raw_value = value_part.strip()
-
- # Path for this key (table + key)
- path = current_table + (key,)
-
- # Special case: inline table
- if (
- raw_value.startswith("{")
- and raw_value.endswith("}")
- and tomllib is not None
- ):
- try:
- # Parse the inline table as a tiny TOML document
- mini_source = "table = " + raw_value + "\n"
- mini_data = tomllib.loads(mini_source)["table"]
- except Exception:
- mini_data = None
-
- if isinstance(mini_data, dict):
- inner_bits: list[str] = []
- for sub_key, sub_val in mini_data.items():
- nested_path = path + (sub_key,)
- nested_var = make_var_name(role_prefix, nested_path)
- if isinstance(sub_val, str):
- inner_bits.append(f'{sub_key} = "{{{{ {nested_var} }}}}"')
- else:
- inner_bits.append(f"{sub_key} = {{{{ {nested_var} }}}}")
- replacement_value = "{ " + ", ".join(inner_bits) + " }"
- new_content = (
- before_eq + "=" + leading_ws + replacement_value + comment_part
- )
- out_lines.append(new_content + newline)
- continue
- # If parsing fails, fall through to normal handling
-
- # Normal scalar value handling (including bools, numbers, strings)
- var_name = make_var_name(role_prefix, path)
- use_quotes = (
- len(raw_value) >= 2
- and raw_value[0] == raw_value[-1]
- and raw_value[0] in {'"', "'"}
- )
-
- if use_quotes:
- quote_char = raw_value[0]
- replacement_value = f"{quote_char}{{{{ {var_name} }}}}{quote_char}"
- else:
- replacement_value = f"{{{{ {var_name} }}}}"
-
- new_content = before_eq + "=" + leading_ws + replacement_value + comment_part
- out_lines.append(new_content + newline)
-
- return "".join(out_lines)
-
-
-def _generate_yaml_template_from_text(
- role_prefix: str,
- text: str,
-) -> str:
- """
- Generate a Jinja2 template for a YAML file, preserving comments and
- blank lines by patching scalar values in-place.
-
- This handles common "config-ish" YAML:
- - top-level and nested mappings
- - lists of scalars
- - lists of small mapping objects
- It does *not* aim to support all YAML edge cases (anchors, tags, etc.).
- """
- lines = text.splitlines(keepends=True)
- out_lines: list[str] = []
-
- # Simple indentation-based context stack: (indent, path, kind)
- # kind is "map" or "seq".
- stack: list[tuple[int, tuple[str, ...], str]] = []
-
- # Track index per parent path for sequences
- seq_counters: dict[tuple[str, ...], int] = {}
-
- def current_path() -> tuple[str, ...]:
- return stack[-1][1] if stack else ()
-
- for raw_line in lines:
- stripped = raw_line.lstrip()
- indent = len(raw_line) - len(stripped)
-
- # Blank or pure comment lines unchanged
- if not stripped or stripped.startswith("#"):
- out_lines.append(raw_line)
- continue
-
- # Adjust stack based on indent
- while stack and indent < stack[-1][0]:
- stack.pop()
-
- # --- Handle mapping key lines: "key:" or "key: value"
- if ":" in stripped and not stripped.lstrip().startswith("- "):
- # separate key and rest
- key_part, rest = stripped.split(":", 1)
- key = key_part.strip()
- if not key:
- out_lines.append(raw_line)
- continue
-
- # Is this just "key:" or "key: value"?
- rest_stripped = rest.lstrip(" \t")
-
- # Use the same inline-comment splitter to see if there's any real value
- value_candidate, _ = _split_inline_comment(rest_stripped, {"#"})
- has_value = bool(value_candidate.strip())
-
- # Update stack/context: current mapping at this indent
- # Replace any existing mapping at same indent
- if stack and stack[-1][0] == indent and stack[-1][2] == "map":
- stack.pop()
- path = current_path() + (key,)
- stack.append((indent, path, "map"))
-
- if not has_value:
- # Just "key:" -> collection or nested structure begins on following lines.
- out_lines.append(raw_line)
- continue
-
- # We have an inline scalar value on this same line.
-
- # Separate value from inline comment
- value_part, comment_part = _split_inline_comment(rest_stripped, {"#"})
- raw_value = value_part.strip()
- var_name = make_var_name(role_prefix, path)
-
- # Keep quote-style if original was quoted
- use_quotes = (
- len(raw_value) >= 2
- and raw_value[0] == raw_value[-1]
- and raw_value[0] in {'"', "'"}
- )
-
- if use_quotes:
- q = raw_value[0]
- replacement = f"{q}{{{{ {var_name} }}}}{q}"
- else:
- replacement = f"{{{{ {var_name} }}}}"
-
- leading = rest[: len(rest) - len(rest.lstrip(" \t"))]
- new_stripped = f"{key}: {leading}{replacement}{comment_part}"
- out_lines.append(
- " " * indent + new_stripped + ("\n" if raw_line.endswith("\n") else "")
- )
- continue
-
- # --- Handle list items: "- value" or "- key: value"
- if stripped.startswith("- "):
- # Determine parent path
- # If top of stack isn't sequence at this indent, push one using current path
- if not stack or stack[-1][0] != indent or stack[-1][2] != "seq":
- parent_path = current_path()
- stack.append((indent, parent_path, "seq"))
-
- parent_path = stack[-1][1]
- content = stripped[2:] # after "- "
- parent_path = stack[-1][1]
- content = stripped[2:] # after "- "
-
- # Determine index for this parent path
- index = seq_counters.get(parent_path, 0)
- seq_counters[parent_path] = index + 1
-
- path = parent_path + (str(index),)
-
- value_part, comment_part = _split_inline_comment(content, {"#"})
- raw_value = value_part.strip()
- var_name = make_var_name(role_prefix, path)
-
- # If it's of the form "key: value" inside the list, we could try to
- # support that, but a simple scalar is the common case:
- use_quotes = (
- len(raw_value) >= 2
- and raw_value[0] == raw_value[-1]
- and raw_value[0] in {'"', "'"}
- )
-
- if use_quotes:
- q = raw_value[0]
- replacement = f"{q}{{{{ {var_name} }}}}{q}"
- else:
- replacement = f"{{{{ {var_name} }}}}"
-
- new_stripped = f"- {replacement}{comment_part}"
- out_lines.append(
- " " * indent + new_stripped + ("\n" if raw_line.endswith("\n") else "")
- )
- continue
-
- # Anything else (multi-line scalars, weird YAML): leave untouched
- out_lines.append(raw_line)
-
- return "".join(out_lines)
-
-
-def _generate_json_template(role_prefix: str, data: Any) -> str:
- """
- Generate a JSON Jinja2 template from parsed JSON data.
-
- All scalar values are replaced with Jinja expressions whose names are
- derived from the path, similar to TOML/YAML.
- """
-
- def _walk(obj: Any, path: tuple[str, ...] = ()) -> Any:
- if isinstance(obj, dict):
- return {k: _walk(v, path + (str(k),)) for k, v in obj.items()}
- if isinstance(obj, list):
- return [_walk(v, path + (str(i),)) for i, v in enumerate(obj)]
- # scalar
- var_name = make_var_name(role_prefix, path)
- return f"{{{{ {var_name} }}}}"
-
- templated = _walk(data)
- return json.dumps(templated, indent=2, ensure_ascii=False) + "\n"
-
-
-def generate_template(
+def generate_jinja2_template(
fmt: str,
parsed: Any,
role_prefix: str,
original_text: str | None = None,
+ loop_candidates: list[LoopCandidate] | None = None,
) -> str:
"""
Generate a Jinja2 template for the config.
-
- If original_text is provided, comments and blank lines are preserved by
- patching values in-place. Otherwise we fall back to reconstructing from
- the parsed structure (no comments). JSON of course does not support
- comments.
"""
- if original_text is not None:
- if fmt == "toml":
- return _generate_toml_template_from_text(role_prefix, original_text)
- if fmt == "ini":
- return _generate_ini_template_from_text(role_prefix, original_text)
- if fmt == "yaml":
- return _generate_yaml_template_from_text(role_prefix, original_text)
- # For JSON we ignore original_text and reconstruct from parsed structure below
- if fmt != "json":
- raise ValueError(f"Unsupported format: {fmt}")
+ handler = _HANDLERS.get(fmt)
- # Fallback: previous behaviour (no comments preserved)
- if fmt == "toml":
- if not isinstance(parsed, dict):
- raise TypeError("TOML parser result must be a dict")
- return _generate_toml_template(role_prefix, parsed)
- if fmt == "ini":
- if not isinstance(parsed, configparser.ConfigParser):
- raise TypeError("INI parser result must be a ConfigParser")
- return _generate_ini_template(role_prefix, parsed)
- if fmt == "yaml":
- if not isinstance(parsed, (dict, list)):
- raise TypeError("YAML parser result must be a dict or list")
- return _generate_yaml_template_from_text(
- role_prefix, yaml.safe_dump(parsed, sort_keys=False)
+ if handler is None:
+ raise ValueError(f"Unsupported format: {fmt}")
+
+ # Check if handler supports loop-aware generation
+ if hasattr(handler, "generate_jinja2_template_with_loops") and loop_candidates:
+ return handler.generate_jinja2_template_with_loops(
+ parsed, role_prefix, original_text, loop_candidates
)
- if fmt == "json":
- if not isinstance(parsed, (dict, list)):
- raise TypeError("JSON parser result must be a dict or list")
- return _generate_json_template(role_prefix, parsed)
- raise ValueError(f"Unsupported format: {fmt}")
+
+ # Fallback to original scalar-only generation
+ return handler.generate_jinja2_template(
+ parsed, role_prefix, original_text=original_text
+ )
+
+
+def _stringify_timestamps(obj: Any) -> Any:
+ """
+ Recursively walk a parsed config and turn any datetime/date/time objects
+ into plain strings in ISO-8601 form.
+
+ This prevents Python datetime objects from leaking into YAML/Jinja, which
+ would otherwise reformat the value (e.g. replacing 'T' with a space).
+
+ This commonly occurs otherwise with TOML and YAML files, which sees
+ Python automatically convert those sorts of strings into datetime objects.
+ """
+ if isinstance(obj, dict):
+ return {k: _stringify_timestamps(v) for k, v in obj.items()}
+ if isinstance(obj, list):
+ return [_stringify_timestamps(v) for v in obj]
+
+ # TOML & YAML both use the standard datetime types
+ if isinstance(obj, datetime.datetime):
+ # Use default ISO-8601: 'YYYY-MM-DDTHH:MM:SS±HH:MM' (with 'T')
+ return obj.isoformat()
+ if isinstance(obj, (datetime.date, datetime.time)):
+ return obj.isoformat()
+
+ return obj
diff --git a/src/jinjaturtle/handlers/__init__.py b/src/jinjaturtle/handlers/__init__.py
new file mode 100644
index 0000000..6bbcba1
--- /dev/null
+++ b/src/jinjaturtle/handlers/__init__.py
@@ -0,0 +1,19 @@
+from __future__ import annotations
+
+from .base import BaseHandler
+from .dict import DictLikeHandler
+from .ini import IniHandler
+from .json import JsonHandler
+from .toml import TomlHandler
+from .yaml import YamlHandler
+from .xml import XmlHandler
+
+__all__ = [
+ "BaseHandler",
+ "DictLikeHandler",
+ "IniHandler",
+ "JsonHandler",
+ "TomlHandler",
+ "YamlHandler",
+ "XmlHandler",
+]
diff --git a/src/jinjaturtle/handlers/base.py b/src/jinjaturtle/handlers/base.py
new file mode 100644
index 0000000..14aaec7
--- /dev/null
+++ b/src/jinjaturtle/handlers/base.py
@@ -0,0 +1,79 @@
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Any, Iterable
+
+
+class BaseHandler:
+ """
+ Base class for a config format handler.
+
+ Each handler is responsible for:
+ - parse(path) -> parsed object
+ - flatten(parsed) -> list[(path_tuple, value)]
+ - generate_jinja2_template(parsed, role_prefix, original_text=None) -> str
+ """
+
+ fmt: str # e.g. "ini", "yaml", ...
+
+ def parse(self, path: Path) -> Any:
+ raise NotImplementedError
+
+ def flatten(self, parsed: Any) -> list[tuple[tuple[str, ...], Any]]:
+ raise NotImplementedError
+
+ def generate_jinja2_template(
+ self,
+ parsed: Any,
+ role_prefix: str,
+ original_text: str | None = None,
+ ) -> str:
+ raise NotImplementedError
+
+ def _split_inline_comment(
+ self, text: str, comment_chars: set[str]
+ ) -> tuple[str, str]:
+ """
+ Split 'value # comment' into (value_part, comment_part), where
+ comment_part starts at the first unquoted comment character.
+
+ comment_chars is e.g. {'#'} for TOML/YAML, {'#', ';'} for INI.
+ """
+ in_single = False
+ in_double = False
+ for i, ch in enumerate(text):
+ if ch == "'" and not in_double:
+ in_single = not in_single
+ elif ch == '"' and not in_single:
+ in_double = not in_double
+ elif ch in comment_chars and not in_single and not in_double:
+ return text[:i], text[i:]
+ return text, ""
+
+ @staticmethod
+ def make_var_name(role_prefix: str, path: Iterable[str]) -> str:
+ """
+ Build an Ansible var name like:
+ role_prefix_section_subsection_key
+
+ Sanitises parts to lowercase [a-z0-9_] and strips extras.
+ """
+ role_prefix = role_prefix.strip().lower()
+ clean_parts: list[str] = []
+
+ for part in path:
+ part = str(part).strip()
+ part = part.replace(" ", "_")
+ cleaned_chars: list[str] = []
+ for c in part:
+ if c.isalnum() or c == "_":
+ cleaned_chars.append(c.lower())
+ else:
+ cleaned_chars.append("_")
+ cleaned_part = "".join(cleaned_chars).strip("_")
+ if cleaned_part:
+ clean_parts.append(cleaned_part)
+
+ if clean_parts:
+ return role_prefix + "_" + "_".join(clean_parts)
+ return role_prefix
diff --git a/src/jinjaturtle/handlers/dict.py b/src/jinjaturtle/handlers/dict.py
new file mode 100644
index 0000000..eb8d926
--- /dev/null
+++ b/src/jinjaturtle/handlers/dict.py
@@ -0,0 +1,31 @@
+from __future__ import annotations
+
+from typing import Any
+
+from . import BaseHandler
+
+
+class DictLikeHandler(BaseHandler):
+ """
+ Base for TOML/YAML/JSON: nested dict/list structures.
+
+ Subclasses control whether lists are flattened.
+ """
+
+ flatten_lists: bool = False # override in subclasses
+
+ def flatten(self, parsed: Any) -> list[tuple[tuple[str, ...], Any]]:
+ items: list[tuple[tuple[str, ...], Any]] = []
+
+ def _walk(obj: Any, path: tuple[str, ...] = ()) -> None:
+ if isinstance(obj, dict):
+ for k, v in obj.items():
+ _walk(v, path + (str(k),))
+ elif isinstance(obj, list) and self.flatten_lists:
+ for i, v in enumerate(obj):
+ _walk(v, path + (str(i),))
+ else:
+ items.append((path, obj))
+
+ _walk(parsed)
+ return items
diff --git a/src/jinjaturtle/handlers/ini.py b/src/jinjaturtle/handlers/ini.py
new file mode 100644
index 0000000..ad92b72
--- /dev/null
+++ b/src/jinjaturtle/handlers/ini.py
@@ -0,0 +1,153 @@
+from __future__ import annotations
+
+import configparser
+from pathlib import Path
+from typing import Any
+
+from . import BaseHandler
+
+
+class IniHandler(BaseHandler):
+ fmt = "ini"
+
+ def parse(self, path: Path) -> configparser.ConfigParser:
+ parser = configparser.ConfigParser()
+ parser.optionxform = str # noqa
+ with path.open("r", encoding="utf-8") as f:
+ parser.read_file(f)
+ return parser
+
+ def flatten(self, parsed: Any) -> list[tuple[tuple[str, ...], Any]]:
+ if not isinstance(parsed, configparser.ConfigParser):
+ raise TypeError("INI parser result must be a ConfigParser")
+ parser: configparser.ConfigParser = parsed
+ items: list[tuple[tuple[str, ...], Any]] = []
+ for section in parser.sections():
+ for key, value in parser.items(section, raw=True):
+ raw = value.strip()
+ if len(raw) >= 2 and raw[0] == raw[-1] and raw[0] in {'"', "'"}:
+ processed: Any = raw[1:-1]
+ else:
+ processed = raw
+ items.append(((section, key), processed))
+ return items
+
+ def generate_jinja2_template(
+ self,
+ parsed: Any,
+ role_prefix: str,
+ original_text: str | None = None,
+ ) -> str:
+ if original_text is not None:
+ return self._generate_ini_template_from_text(role_prefix, original_text)
+ if not isinstance(parsed, configparser.ConfigParser):
+ raise TypeError("INI parser result must be a ConfigParser")
+ return self._generate_ini_template(role_prefix, parsed)
+
+ def _generate_ini_template(
+ self, role_prefix: str, parser: configparser.ConfigParser
+ ) -> str:
+ """
+ Generate an INI-style Jinja2 template from a ConfigParser.
+
+ Quoting heuristic:
+ foo = "bar" -> foo = "{{ prefix_section_foo }}"
+ num = 42 -> num = {{ prefix_section_num }}
+ """
+ lines: list[str] = []
+
+ for section in parser.sections():
+ lines.append(f"[{section}]")
+ for key, value in parser.items(section, raw=True):
+ path = (section, key)
+ var_name = self.make_var_name(role_prefix, path)
+ value = value.strip()
+ if len(value) >= 2 and value[0] == value[-1] and value[0] in {'"', "'"}:
+ lines.append(f'{key} = "{{{{ {var_name} }}}}"')
+ else:
+ lines.append(f"{key} = {{{{ {var_name} }}}}")
+ lines.append("")
+
+ return "\n".join(lines).rstrip() + "\n"
+
+ def _generate_ini_template_from_text(self, role_prefix: str, text: str) -> str:
+ """
+ Generate a Jinja2 template for an INI/php.ini-style file, preserving
+ comments, blank lines, and section headers by patching values in-place.
+ """
+ lines = text.splitlines(keepends=True)
+ current_section: str | None = None
+ out_lines: list[str] = []
+
+ for raw_line in lines:
+ line = raw_line
+ stripped = line.lstrip()
+
+ # Blank or pure comment: keep as-is
+ if not stripped or stripped[0] in {"#", ";"}:
+ out_lines.append(raw_line)
+ continue
+
+ # Section header
+ if stripped.startswith("[") and "]" in stripped:
+ header_inner = stripped[1 : stripped.index("]")]
+ current_section = header_inner.strip()
+ out_lines.append(raw_line)
+ continue
+
+ # Work without newline so we can re-attach it exactly
+ newline = ""
+ content = raw_line
+ if content.endswith("\r\n"):
+ newline = "\r\n"
+ content = content[:-2]
+ elif content.endswith("\n"):
+ newline = content[-1]
+ content = content[:-1]
+
+ eq_index = content.find("=")
+ if eq_index == -1:
+ # Not a simple key=value line: leave untouched
+ out_lines.append(raw_line)
+ continue
+
+ before_eq = content[:eq_index]
+ after_eq = content[eq_index + 1 :]
+
+ key = before_eq.strip()
+ if not key:
+ out_lines.append(raw_line)
+ continue
+
+ # Whitespace after '='
+ value_ws_len = len(after_eq) - len(after_eq.lstrip(" \t"))
+ leading_ws = after_eq[:value_ws_len]
+ value_and_comment = after_eq[value_ws_len:]
+
+ value_part, comment_part = self._split_inline_comment(
+ value_and_comment, {"#", ";"}
+ )
+ raw_value = value_part.strip()
+
+ path = (key,) if current_section is None else (current_section, key)
+ var_name = self.make_var_name(role_prefix, path)
+
+ # Was the original value quoted?
+ use_quotes = (
+ len(raw_value) >= 2
+ and raw_value[0] == raw_value[-1]
+ and raw_value[0] in {'"', "'"}
+ )
+
+ if use_quotes:
+ quote_char = raw_value[0]
+ replacement_value = f"{quote_char}{{{{ {var_name} }}}}{quote_char}"
+ else:
+ replacement_value = f"{{{{ {var_name} }}}}"
+
+ new_content = (
+ before_eq + "=" + leading_ws + replacement_value + comment_part
+ )
+ out_lines.append(new_content + newline)
+
+ return "".join(out_lines)
diff --git a/src/jinjaturtle/handlers/json.py b/src/jinjaturtle/handlers/json.py
new file mode 100644
index 0000000..035efdc
--- /dev/null
+++ b/src/jinjaturtle/handlers/json.py
@@ -0,0 +1,191 @@
+from __future__ import annotations
+
+import json
+import re
+from pathlib import Path
+from typing import Any
+
+from . import DictLikeHandler
+from ..loop_analyzer import LoopCandidate
+
+
+class JsonHandler(DictLikeHandler):
+ fmt = "json"
+ flatten_lists = True
+
+ def parse(self, path: Path) -> Any:
+ with path.open("r", encoding="utf-8") as f:
+ return json.load(f)
+
+ def generate_jinja2_template(
+ self,
+ parsed: Any,
+ role_prefix: str,
+ original_text: str | None = None,
+ ) -> str:
+ """Original scalar-only template generation."""
+ if not isinstance(parsed, (dict, list)):
+ raise TypeError("JSON parser result must be a dict or list")
+ # As before: ignore original_text and rebuild structurally
+ return self._generate_json_template(role_prefix, parsed)
+
+ def generate_jinja2_template_with_loops(
+ self,
+ parsed: Any,
+ role_prefix: str,
+ original_text: str | None,
+ loop_candidates: list[LoopCandidate],
+ ) -> str:
+ """Generate template with Jinja2 for loops where appropriate."""
+ if not isinstance(parsed, (dict, list)):
+ raise TypeError("JSON parser result must be a dict or list")
+
+ # Build loop path set for quick lookup
+ loop_paths = {candidate.path for candidate in loop_candidates}
+
+ return self._generate_json_template_with_loops(
+ role_prefix, parsed, loop_paths, loop_candidates
+ )
+
+ def _generate_json_template(self, role_prefix: str, data: Any) -> str:
+ """
+ Generate a JSON Jinja2 template from parsed JSON data.
+
+ All scalar values are replaced with Jinja expressions whose names are
+ derived from the path, similar to TOML/YAML.
+
+ Uses | tojson filter to preserve types (numbers, booleans, null).
+ """
+
+ def _walk(obj: Any, path: tuple[str, ...] = ()) -> Any:
+ if isinstance(obj, dict):
+ return {k: _walk(v, path + (str(k),)) for k, v in obj.items()}
+ if isinstance(obj, list):
+ return [_walk(v, path + (str(i),)) for i, v in enumerate(obj)]
+ # scalar - use marker that will be replaced with tojson
+ var_name = self.make_var_name(role_prefix, path)
+ return f"__SCALAR__{var_name}__"
+
+ templated = _walk(data)
+ json_str = json.dumps(templated, indent=2, ensure_ascii=False)
+
+ # Replace scalar markers with Jinja expressions using tojson filter
+ # This preserves types (numbers stay numbers, booleans stay booleans)
+ json_str = re.sub(
+ r'"__SCALAR__([a-zA-Z_][a-zA-Z0-9_]*)__"', r"{{ \1 | tojson }}", json_str
+ )
+
+ return json_str + "\n"
+
+ def _generate_json_template_with_loops(
+ self,
+ role_prefix: str,
+ data: Any,
+ loop_paths: set[tuple[str, ...]],
+ loop_candidates: list[LoopCandidate],
+ path: tuple[str, ...] = (),
+ ) -> str:
+ """
+ Generate a JSON Jinja2 template with for loops where appropriate.
+ """
+
+ def _walk(obj: Any, current_path: tuple[str, ...] = ()) -> Any:
+ # Check if this path is a loop candidate
+ if current_path in loop_paths:
+ # Find the matching candidate
+ candidate = next(c for c in loop_candidates if c.path == current_path)
+ collection_var = self.make_var_name(role_prefix, candidate.path)
+ item_var = candidate.loop_var
+
+ if candidate.item_schema == "scalar":
+ # Simple list of scalars - use special marker that we'll replace
+ return f"__LOOP_SCALAR__{collection_var}__{item_var}__"
+ elif candidate.item_schema in ("simple_dict", "nested"):
+ # List of dicts - use special marker
+ return f"__LOOP_DICT__{collection_var}__{item_var}__"
+
+ if isinstance(obj, dict):
+ return {k: _walk(v, current_path + (str(k),)) for k, v in obj.items()}
+ if isinstance(obj, list):
+ # Check if this list is a loop candidate
+ if current_path in loop_paths:
+ # Already handled above
+ return _walk(obj, current_path)
+ return [_walk(v, current_path + (str(i),)) for i, v in enumerate(obj)]
+
+ # scalar - use marker to preserve type
+ var_name = self.make_var_name(role_prefix, current_path)
+ return f"__SCALAR__{var_name}__"
+
+ templated = _walk(data, path)
+
+ # Convert to JSON string
+ json_str = json.dumps(templated, indent=2, ensure_ascii=False)
+
+ # Replace scalar markers with Jinja expressions using tojson filter
+ json_str = re.sub(
+ r'"__SCALAR__([a-zA-Z_][a-zA-Z0-9_]*)__"', r"{{ \1 | tojson }}", json_str
+ )
+
+ # Post-process to replace loop markers with actual Jinja loops
+ for candidate in loop_candidates:
+ collection_var = self.make_var_name(role_prefix, candidate.path)
+ item_var = candidate.loop_var
+
+ if candidate.item_schema == "scalar":
+ # Replace scalar loop marker with Jinja for loop
+ marker = f'"__LOOP_SCALAR__{collection_var}__{item_var}__"'
+ replacement = self._generate_json_scalar_loop(
+ collection_var, item_var, candidate
+ )
+ json_str = json_str.replace(marker, replacement)
+
+ elif candidate.item_schema in ("simple_dict", "nested"):
+ # Replace dict loop marker with Jinja for loop
+ marker = f'"__LOOP_DICT__{collection_var}__{item_var}__"'
+ replacement = self._generate_json_dict_loop(
+ collection_var, item_var, candidate
+ )
+ json_str = json_str.replace(marker, replacement)
+
+ return json_str + "\n"
+
+ def _generate_json_scalar_loop(
+ self, collection_var: str, item_var: str, candidate: LoopCandidate
+ ) -> str:
+ """Generate a Jinja for loop for a scalar list in JSON."""
+ # Use tojson filter to properly handle strings (quotes them) and other types
+ # Include array brackets around the loop
+ return (
+ f"[{{% for {item_var} in {collection_var} %}}"
+ f"{{{{ {item_var} | tojson }}}}"
+ f"{{% if not loop.last %}}, {{% endif %}}"
+ f"{{% endfor %}}]"
+ )
+
+ def _generate_json_dict_loop(
+ self, collection_var: str, item_var: str, candidate: LoopCandidate
+ ) -> str:
+ """Generate a Jinja for loop for a dict list in JSON."""
+ if not candidate.items:
+ return "[]"
+
+ # Get first item as template
+ sample_item = candidate.items[0]
+
+ # Build the dict template - use tojson for all values to handle types correctly
+ fields = []
+ for key, value in sample_item.items():
+ if key == "_key":
+ continue
+ # Use tojson filter to properly serialize all types (strings, numbers, booleans)
+ fields.append(f'"{key}": {{{{ {item_var}.{key} | tojson }}}}')
+
+ dict_template = "{" + ", ".join(fields) + "}"
+
+ return (
+ f"{{% for {item_var} in {collection_var} %}}"
+ f"{dict_template}"
+ f"{{% if not loop.last %}}, {{% endif %}}"
+ f"{{% endfor %}}"
+ )
diff --git a/src/jinjaturtle/handlers/toml.py b/src/jinjaturtle/handlers/toml.py
new file mode 100644
index 0000000..fe071bd
--- /dev/null
+++ b/src/jinjaturtle/handlers/toml.py
@@ -0,0 +1,547 @@
+from __future__ import annotations
+
+from pathlib import Path
+from typing import Any
+
+from . import DictLikeHandler
+from ..loop_analyzer import LoopCandidate
+
+try:
+ import tomllib
+except Exception:
+ import tomli as tomllib
+
+
+class TomlHandler(DictLikeHandler):
+ fmt = "toml"
+ flatten_lists = False # keep lists as scalars
+
+ def parse(self, path: Path) -> Any:
+ if tomllib is None:
+ raise RuntimeError(
+ "tomllib/tomli is required to parse TOML files but is not installed"
+ )
+ with path.open("rb") as f:
+ return tomllib.load(f)
+
+ def generate_jinja2_template(
+ self,
+ parsed: Any,
+ role_prefix: str,
+ original_text: str | None = None,
+ ) -> str:
+ """Original scalar-only template generation."""
+ if original_text is not None:
+ return self._generate_toml_template_from_text(role_prefix, original_text)
+ if not isinstance(parsed, dict):
+ raise TypeError("TOML parser result must be a dict")
+ return self._generate_toml_template(role_prefix, parsed)
+
+ def generate_jinja2_template_with_loops(
+ self,
+ parsed: Any,
+ role_prefix: str,
+ original_text: str | None,
+ loop_candidates: list[LoopCandidate],
+ ) -> str:
+ """Generate template with Jinja2 for loops where appropriate."""
+ if original_text is not None:
+ return self._generate_toml_template_with_loops_from_text(
+ role_prefix, original_text, loop_candidates
+ )
+ if not isinstance(parsed, dict):
+ raise TypeError("TOML parser result must be a dict")
+ return self._generate_toml_template_with_loops(
+ role_prefix, parsed, loop_candidates
+ )
+
+ def _generate_toml_template(self, role_prefix: str, data: dict[str, Any]) -> str:
+ """
+ Generate a TOML Jinja2 template from parsed TOML dict.
+
+ Values become Jinja placeholders, with quoting preserved for strings:
+ foo = "bar" -> foo = "{{ prefix_foo }}"
+ port = 8080 -> port = {{ prefix_port }}
+ """
+ lines: list[str] = []
+
+ def emit_kv(path: tuple[str, ...], key: str, value: Any) -> None:
+ var_name = self.make_var_name(role_prefix, path + (key,))
+ if isinstance(value, str):
+ lines.append(f'{key} = "{{{{ {var_name} }}}}"')
+ elif isinstance(value, bool):
+ # Booleans need | lower filter (Python True/False ā TOML true/false)
+ lines.append(f"{key} = {{{{ {var_name} | lower }}}}")
+ else:
+ lines.append(f"{key} = {{{{ {var_name} }}}}")
+
+ def walk(obj: dict[str, Any], path: tuple[str, ...] = ()) -> None:
+ scalar_items = {k: v for k, v in obj.items() if not isinstance(v, dict)}
+ nested_items = {k: v for k, v in obj.items() if isinstance(v, dict)}
+
+ if path:
+ header = ".".join(path)
+ lines.append(f"[{header}]")
+
+ for key, val in scalar_items.items():
+ emit_kv(path, str(key), val)
+
+ if scalar_items:
+ lines.append("")
+
+ for key, val in nested_items.items():
+ walk(val, path + (str(key),))
+
+ # Root scalars (no table header)
+ root_scalars = {k: v for k, v in data.items() if not isinstance(v, dict)}
+ for key, val in root_scalars.items():
+ emit_kv((), str(key), val)
+ if root_scalars:
+ lines.append("")
+
+ # Tables
+ for key, val in data.items():
+ if isinstance(val, dict):
+ walk(val, (str(key),))
+
+ return "\n".join(lines).rstrip() + "\n"
+
+ def _generate_toml_template_with_loops(
+ self,
+ role_prefix: str,
+ data: dict[str, Any],
+ loop_candidates: list[LoopCandidate],
+ ) -> str:
+ """
+ Generate a TOML Jinja2 template with for loops where appropriate.
+ """
+ lines: list[str] = []
+ loop_paths = {candidate.path for candidate in loop_candidates}
+
+ def emit_kv(path: tuple[str, ...], key: str, value: Any) -> None:
+ var_name = self.make_var_name(role_prefix, path + (key,))
+ if isinstance(value, str):
+ lines.append(f'{key} = "{{{{ {var_name} }}}}"')
+ elif isinstance(value, bool):
+ # Booleans need | lower filter (Python True/False ā TOML true/false)
+ lines.append(f"{key} = {{{{ {var_name} | lower }}}}")
+ elif isinstance(value, list):
+ # Check if this list is a loop candidate
+ if path + (key,) in loop_paths:
+ # Find the matching candidate
+ candidate = next(
+ c for c in loop_candidates if c.path == path + (key,)
+ )
+ collection_var = self.make_var_name(role_prefix, candidate.path)
+ item_var = candidate.loop_var
+
+ if candidate.item_schema == "scalar":
+ # Scalar list loop
+ lines.append(
+ f"{key} = ["
+ f"{{% for {item_var} in {collection_var} %}}"
+ f"{{{{ {item_var} }}}}"
+ f"{{% if not loop.last %}}, {{% endif %}}"
+ f"{{% endfor %}}"
+ f"]"
+ )
+ elif candidate.item_schema in ("simple_dict", "nested"):
+ # Dict list loop - TOML array of tables
+ # This is complex for TOML, using simplified approach
+ lines.append(f"{key} = {{{{ {var_name} | tojson }}}}")
+ else:
+ # Not a loop, treat as regular variable
+ lines.append(f"{key} = {{{{ {var_name} }}}}")
+ else:
+ lines.append(f"{key} = {{{{ {var_name} }}}}")
+
+ def walk(obj: dict[str, Any], path: tuple[str, ...] = ()) -> None:
+ scalar_items = {k: v for k, v in obj.items() if not isinstance(v, dict)}
+ nested_items = {k: v for k, v in obj.items() if isinstance(v, dict)}
+
+ if path:
+ header = ".".join(path)
+ lines.append(f"[{header}]")
+
+ for key, val in scalar_items.items():
+ emit_kv(path, str(key), val)
+
+ if scalar_items:
+ lines.append("")
+
+ for key, val in nested_items.items():
+ walk(val, path + (str(key),))
+
+ # Root scalars (no table header)
+ root_scalars = {k: v for k, v in data.items() if not isinstance(v, dict)}
+ for key, val in root_scalars.items():
+ emit_kv((), str(key), val)
+ if root_scalars:
+ lines.append("")
+
+ # Tables
+ for key, val in data.items():
+ if isinstance(val, dict):
+ walk(val, (str(key),))
+
+ return "\n".join(lines).rstrip() + "\n"
+
+ def _generate_toml_template_from_text(self, role_prefix: str, text: str) -> str:
+ """
+ Generate a Jinja2 template for a TOML file, preserving comments,
+ blank lines, and table headers by patching values in-place.
+
+ Handles inline tables like:
+ temp_targets = { cpu = 79.5, case = 72.0 }
+
+ by mapping them to:
+ temp_targets = { cpu = {{ prefix_database_temp_targets_cpu }},
+ case = {{ prefix_database_temp_targets_case }} }
+ """
+ lines = text.splitlines(keepends=True)
+ current_table: tuple[str, ...] = ()
+ out_lines: list[str] = []
+
+ for raw_line in lines:
+ line = raw_line
+ stripped = line.lstrip()
+
+ # Blank or pure comment
+ if not stripped or stripped.startswith("#"):
+ out_lines.append(raw_line)
+ continue
+
+ # Table header: [server] or [server.tls] or [[array.of.tables]]
+ if stripped.startswith("[") and "]" in stripped:
+ header = stripped
+ first_bracket = header.find("[")
+ closing_bracket = header.find("]", first_bracket + 1)
+ if first_bracket != -1 and closing_bracket != -1:
+ inner = header[first_bracket + 1 : closing_bracket].strip()
+ inner = inner.strip("[]") # handle [[table]] as well
+ parts = [p.strip() for p in inner.split(".") if p.strip()]
+ current_table = tuple(parts)
+ out_lines.append(raw_line)
+ continue
+
+ # Try key = value
+ newline = ""
+ content = raw_line
+ if content.endswith("\r\n"):
+ newline = "\r\n"
+ content = content[:-2]
+ elif content.endswith("\n"):
+ newline = content[-1]
+ content = content[:-1]
+
+ eq_index = content.find("=")
+ if eq_index == -1:
+ out_lines.append(raw_line)
+ continue
+
+ before_eq = content[:eq_index]
+ after_eq = content[eq_index + 1 :]
+
+ key = before_eq.strip()
+ if not key:
+ out_lines.append(raw_line)
+ continue
+
+ # Whitespace after '='
+ value_ws_len = len(after_eq) - len(after_eq.lstrip(" \t"))
+ leading_ws = after_eq[:value_ws_len]
+ value_and_comment = after_eq[value_ws_len:]
+
+ value_part, comment_part = self._split_inline_comment(
+ value_and_comment, {"#"}
+ )
+ raw_value = value_part.strip()
+
+ # Path for this key (table + key)
+ path = current_table + (key,)
+
+ # Special case: inline table
+ if (
+ raw_value.startswith("{")
+ and raw_value.endswith("}")
+ and tomllib is not None
+ ):
+ try:
+ # Parse the inline table as a tiny TOML document
+ mini_source = "table = " + raw_value + "\n"
+ mini_data = tomllib.loads(mini_source)["table"]
+ except Exception:
+ mini_data = None
+
+ if isinstance(mini_data, dict):
+ inner_bits: list[str] = []
+ for sub_key, sub_val in mini_data.items():
+ nested_path = path + (sub_key,)
+ nested_var = self.make_var_name(role_prefix, nested_path)
+ if isinstance(sub_val, str):
+ inner_bits.append(f'{sub_key} = "{{{{ {nested_var} }}}}"')
+ elif isinstance(sub_val, bool):
+ inner_bits.append(
+ f"{sub_key} = {{{{ {nested_var} | lower }}}}"
+ )
+ else:
+ inner_bits.append(f"{sub_key} = {{{ {nested_var} }}}")
+ replacement_value = "{ " + ", ".join(inner_bits) + " }"
+ new_content = (
+ before_eq + "=" + leading_ws + replacement_value + comment_part
+ )
+ out_lines.append(new_content + newline)
+ continue
+ # If parsing fails, fall through to normal handling
+
+ # Normal scalar value handling (including bools, numbers, strings)
+ var_name = self.make_var_name(role_prefix, path)
+ use_quotes = (
+ len(raw_value) >= 2
+ and raw_value[0] == raw_value[-1]
+ and raw_value[0] in {'"', "'"}
+ )
+
+ # Check if value is a boolean in the text
+ is_bool = raw_value.strip().lower() in ("true", "false")
+
+ if use_quotes:
+ quote_char = raw_value[0]
+ replacement_value = f"{quote_char}{{{{ {var_name} }}}}{quote_char}"
+ elif is_bool:
+ replacement_value = f"{{{{ {var_name} | lower }}}}"
+ else:
+ replacement_value = f"{{{{ {var_name} }}}}"
+
+ new_content = (
+ before_eq + "=" + leading_ws + replacement_value + comment_part
+ )
+ out_lines.append(new_content + newline)
+
+ return "".join(out_lines)
+
+ def _generate_toml_template_with_loops_from_text(
+ self, role_prefix: str, text: str, loop_candidates: list[LoopCandidate]
+ ) -> str:
+ """
+ Generate a Jinja2 template for a TOML file with loop support.
+ """
+ loop_paths = {candidate.path for candidate in loop_candidates}
+ lines = text.splitlines(keepends=True)
+ current_table: tuple[str, ...] = ()
+ out_lines: list[str] = []
+ skip_until_next_table = (
+ False # Track when we're inside a looped array-of-tables
+ )
+
+ for raw_line in lines:
+ line = raw_line
+ stripped = line.lstrip()
+
+ # Blank or pure comment
+ if not stripped or stripped.startswith("#"):
+ # Only output if we're not skipping
+ if not skip_until_next_table:
+ out_lines.append(raw_line)
+ continue
+
+ # Table header: [server] or [server.tls] or [[array.of.tables]]
+ if stripped.startswith("[") and "]" in stripped:
+ header = stripped
+ # Check if it's array-of-tables ([[name]]) or regular table ([name])
+ is_array_table = header.startswith("[[") and "]]" in header
+
+ if is_array_table:
+ # Extract content between [[ and ]]
+ start = header.find("[[") + 2
+ end = header.find("]]", start)
+ inner = header[start:end].strip() if end != -1 else ""
+ else:
+ # Extract content between [ and ]
+ start = header.find("[") + 1
+ end = header.find("]", start)
+ inner = header[start:end].strip() if end != -1 else ""
+
+ if inner:
+ parts = [p.strip() for p in inner.split(".") if p.strip()]
+ table_path = tuple(parts)
+
+ # Check if this is an array-of-tables that's a loop candidate
+ if is_array_table and table_path in loop_paths:
+ # If we're already skipping this table, this is a subsequent occurrence
+ if skip_until_next_table and current_table == table_path:
+ # This is a duplicate [[table]] - skip it
+ continue
+
+ # This is the first occurrence - generate the loop
+ current_table = table_path
+ candidate = next(
+ c for c in loop_candidates if c.path == table_path
+ )
+
+ # Generate the loop header
+ collection_var = self.make_var_name(role_prefix, candidate.path)
+ item_var = candidate.loop_var
+
+ # Get sample item to build template
+ if candidate.items:
+ sample_item = candidate.items[0]
+
+ # Build loop
+ out_lines.append(
+ f"{{% for {item_var} in {collection_var} %}}\n"
+ )
+ out_lines.append(f"[[{'.'.join(table_path)}]]\n")
+
+ # Add fields from sample item
+ for key, value in sample_item.items():
+ if key == "_key":
+ continue
+ if isinstance(value, str):
+ out_lines.append(
+ f'{key} = "{{{{ {item_var}.{key} }}}}"\n'
+ )
+ else:
+ out_lines.append(
+ f"{key} = {{{{ {item_var}.{key} }}}}\n"
+ )
+
+ out_lines.append("{% endfor %}\n")
+
+ # Skip all content until the next different table
+ skip_until_next_table = True
+ continue
+ else:
+ # Regular table or non-loop array - reset skip flag if it's a different table
+ if current_table != table_path:
+ skip_until_next_table = False
+ current_table = table_path
+
+ out_lines.append(raw_line)
+ continue
+
+ # If we're inside a skipped array-of-tables section, skip this line
+ if skip_until_next_table:
+ continue
+
+ # Try key = value
+ newline = ""
+ content = raw_line
+ if content.endswith("\r\n"):
+ newline = "\r\n"
+ content = content[:-2]
+ elif content.endswith("\n"):
+ newline = content[-1]
+ content = content[:-1]
+
+ eq_index = content.find("=")
+ if eq_index == -1:
+ out_lines.append(raw_line)
+ continue
+
+ before_eq = content[:eq_index]
+ after_eq = content[eq_index + 1 :]
+
+ key = before_eq.strip()
+ if not key:
+ out_lines.append(raw_line)
+ continue
+
+ # Whitespace after '='
+ value_ws_len = len(after_eq) - len(after_eq.lstrip(" \t"))
+ leading_ws = after_eq[:value_ws_len]
+ value_and_comment = after_eq[value_ws_len:]
+
+ value_part, comment_part = self._split_inline_comment(
+ value_and_comment, {"#"}
+ )
+ raw_value = value_part.strip()
+
+ # Path for this key (table + key)
+ path = current_table + (key,)
+
+ # Check if this path is a loop candidate
+ if path in loop_paths:
+ candidate = next(c for c in loop_candidates if c.path == path)
+ collection_var = self.make_var_name(role_prefix, candidate.path)
+ item_var = candidate.loop_var
+
+ if candidate.item_schema == "scalar":
+ # Scalar list loop
+ replacement_value = (
+ f"["
+ f"{{% for {item_var} in {collection_var} %}}"
+ f"{{{{ {item_var} }}}}"
+ f"{{% if not loop.last %}}, {{% endif %}}"
+ f"{{% endfor %}}"
+ f"]"
+ )
+ else:
+ # Dict/nested loop - use tojson filter for complex arrays
+ replacement_value = f"{{{{ {collection_var} | tojson }}}}"
+
+ new_content = (
+ before_eq + "=" + leading_ws + replacement_value + comment_part
+ )
+ out_lines.append(new_content + newline)
+ continue
+
+ # Special case: inline table
+ if (
+ raw_value.startswith("{")
+ and raw_value.endswith("}")
+ and tomllib is not None
+ ):
+ try:
+ # Parse the inline table as a tiny TOML document
+ mini_source = "table = " + raw_value + "\n"
+ mini_data = tomllib.loads(mini_source)["table"]
+ except Exception:
+ mini_data = None
+
+ if isinstance(mini_data, dict):
+ inner_bits: list[str] = []
+ for sub_key, sub_val in mini_data.items():
+ nested_path = path + (sub_key,)
+ nested_var = self.make_var_name(role_prefix, nested_path)
+ if isinstance(sub_val, str):
+ inner_bits.append(f'{sub_key} = "{{{{ {nested_var} }}}}"')
+ elif isinstance(sub_val, bool):
+ inner_bits.append(
+ f"{sub_key} = {{{{ {nested_var} | lower }}}}"
+ )
+ else:
+ inner_bits.append(f"{sub_key} = {{{{ {nested_var} }}}}")
+ replacement_value = "{ " + ", ".join(inner_bits) + " }"
+ new_content = (
+ before_eq + "=" + leading_ws + replacement_value + comment_part
+ )
+ out_lines.append(new_content + newline)
+ continue
+ # If parsing fails, fall through to normal handling
+
+ # Normal scalar value handling (including bools, numbers, strings)
+ var_name = self.make_var_name(role_prefix, path)
+ use_quotes = (
+ len(raw_value) >= 2
+ and raw_value[0] == raw_value[-1]
+ and raw_value[0] in {'"', "'"}
+ )
+
+ # Check if value is a boolean in the text
+ is_bool = raw_value.strip().lower() in ("true", "false")
+
+ if use_quotes:
+ quote_char = raw_value[0]
+ replacement_value = f"{quote_char}{{{{ {var_name} }}}}{quote_char}"
+ elif is_bool:
+ replacement_value = f"{{{{ {var_name} | lower }}}}"
+ else:
+ replacement_value = f"{{{{ {var_name} }}}}"
+
+ new_content = (
+ before_eq + "=" + leading_ws + replacement_value + comment_part
+ )
+ out_lines.append(new_content + newline)
+
+ return "".join(out_lines)
diff --git a/src/jinjaturtle/handlers/xml.py b/src/jinjaturtle/handlers/xml.py
new file mode 100644
index 0000000..fed6aba
--- /dev/null
+++ b/src/jinjaturtle/handlers/xml.py
@@ -0,0 +1,459 @@
+from __future__ import annotations
+
+from collections import Counter, defaultdict
+from pathlib import Path
+from typing import Any
+import xml.etree.ElementTree as ET # nosec
+
+from .base import BaseHandler
+from ..loop_analyzer import LoopCandidate
+
+
+class XmlHandler(BaseHandler):
+ """
+ XML handler that can generate both scalar templates and loop-based templates.
+ """
+
+ fmt = "xml"
+
+ def parse(self, path: Path) -> ET.Element:
+ text = path.read_text(encoding="utf-8")
+ parser = ET.XMLParser(
+ target=ET.TreeBuilder(insert_comments=False)
+ ) # nosec B314
+ parser.feed(text)
+ root = parser.close()
+ return root
+
+ def flatten(self, parsed: Any) -> list[tuple[tuple[str, ...], Any]]:
+ if not isinstance(parsed, ET.Element):
+ raise TypeError("XML parser result must be an Element")
+ return self._flatten_xml(parsed)
+
+ def generate_jinja2_template(
+ self,
+ parsed: Any,
+ role_prefix: str,
+ original_text: str | None = None,
+ ) -> str:
+ """Original scalar-only template generation."""
+ if original_text is not None:
+ return self._generate_xml_template_from_text(role_prefix, original_text)
+ if not isinstance(parsed, ET.Element):
+ raise TypeError("XML parser result must be an Element")
+ xml_str = ET.tostring(parsed, encoding="unicode")
+ return self._generate_xml_template_from_text(role_prefix, xml_str)
+
+ def generate_jinja2_template_with_loops(
+ self,
+ parsed: Any,
+ role_prefix: str,
+ original_text: str | None,
+ loop_candidates: list[LoopCandidate],
+ ) -> str:
+ """Generate template with Jinja2 for loops where appropriate."""
+
+ if original_text is not None:
+ return self._generate_xml_template_with_loops_from_text(
+ role_prefix, original_text, loop_candidates
+ )
+
+ if not isinstance(parsed, ET.Element):
+ raise TypeError("XML parser result must be an Element")
+
+ xml_str = ET.tostring(parsed, encoding="unicode")
+ return self._generate_xml_template_with_loops_from_text(
+ role_prefix, xml_str, loop_candidates
+ )
+
+ def _flatten_xml(self, root: ET.Element) -> list[tuple[tuple[str, ...], Any]]:
+ """Flatten an XML tree into (path, value) pairs."""
+ items: list[tuple[tuple[str, ...], Any]] = []
+
+ def walk(elem: ET.Element, path: tuple[str, ...]) -> None:
+ # Attributes
+ for attr_name, attr_val in elem.attrib.items():
+ attr_path = path + (f"@{attr_name}",)
+ items.append((attr_path, attr_val))
+
+ # Children
+ children = [c for c in list(elem) if isinstance(c.tag, str)]
+
+ # Text content
+ text = (elem.text or "").strip()
+ if text:
+ if not elem.attrib and not children:
+ items.append((path, text))
+ else:
+ items.append((path + ("value",), text))
+
+ # Repeated siblings get an index; singletons just use the tag
+ counts = Counter(child.tag for child in children)
+ index_counters: dict[str, int] = defaultdict(int)
+
+ for child in children:
+ tag = child.tag
+ if counts[tag] > 1:
+ idx = index_counters[tag]
+ index_counters[tag] += 1
+ child_path = path + (tag, str(idx))
+ else:
+ child_path = path + (tag,)
+ walk(child, child_path)
+
+ walk(root, ())
+ return items
+
+ def _split_xml_prolog(self, text: str) -> tuple[str, str]:
+ """Split XML into (prolog, body)."""
+ i = 0
+ n = len(text)
+ prolog_parts: list[str] = []
+
+ while i < n:
+ while i < n and text[i].isspace():
+ prolog_parts.append(text[i])
+ i += 1
+ if i >= n:
+ break
+
+ if text.startswith("", i):
+ end = text.find("?>", i + 2)
+ if end == -1:
+ break
+ prolog_parts.append(text[i : end + 2])
+ i = end + 2
+ continue
+
+ if text.startswith("", i + 4)
+ if end == -1:
+ break
+ prolog_parts.append(text[i : end + 3])
+ i = end + 3
+ continue
+
+ if text.startswith("", i + 9)
+ if end == -1:
+ break
+ prolog_parts.append(text[i : end + 1])
+ i = end + 1
+ continue
+
+ if text[i] == "<":
+ break
+
+ break
+
+ return "".join(prolog_parts), text[i:]
+
+ def _apply_jinja_to_xml_tree(
+ self,
+ role_prefix: str,
+ root: ET.Element,
+ loop_candidates: list[LoopCandidate] | None = None,
+ ) -> None:
+ """
+ Mutate XML tree in-place, replacing values with Jinja expressions.
+
+ If loop_candidates is provided, repeated elements matching a candidate
+ will be replaced with a {% for %} loop.
+ """
+
+ # Build a map of loop paths for quick lookup
+ loop_paths = {}
+ if loop_candidates:
+ for candidate in loop_candidates:
+ loop_paths[candidate.path] = candidate
+
+ def walk(elem: ET.Element, path: tuple[str, ...]) -> None:
+ # Attributes (unless this element is in a loop)
+ for attr_name in list(elem.attrib.keys()):
+ attr_path = path + (f"@{attr_name}",)
+ var_name = self.make_var_name(role_prefix, attr_path)
+ elem.set(attr_name, f"{{{{ {var_name} }}}}")
+
+ # Children
+ children = [c for c in list(elem) if isinstance(c.tag, str)]
+
+ # Text content
+ text = (elem.text or "").strip()
+ if text:
+ if not elem.attrib and not children:
+ text_path = path
+ else:
+ text_path = path + ("value",)
+ var_name = self.make_var_name(role_prefix, text_path)
+ elem.text = f"{{{{ {var_name} }}}}"
+
+ # Handle children - check for loops first
+ counts = Counter(child.tag for child in children)
+ index_counters: dict[str, int] = defaultdict(int)
+
+ # Check each tag to see if it's a loop candidate
+ processed_tags = set()
+
+ for child in children:
+ tag = child.tag
+
+ # Skip if we've already processed this tag as a loop
+ if tag in processed_tags:
+ continue
+
+ child_path = path + (tag,)
+
+ # Check if this is a loop candidate
+ if child_path in loop_paths:
+ # Mark this tag as processed
+ processed_tags.add(tag)
+
+ # Remove all children with this tag
+ for child_to_remove in [c for c in children if c.tag == tag]:
+ elem.remove(child_to_remove)
+
+ # Create a loop comment/marker
+ # We'll handle the actual loop generation in text processing
+ loop_marker = ET.Comment(f"LOOP:{tag}")
+ elem.append(loop_marker)
+
+ elif counts[tag] > 1:
+ # Multiple children but not a loop candidate - use indexed paths
+ idx = index_counters[tag]
+ index_counters[tag] += 1
+ indexed_path = path + (tag, str(idx))
+ walk(child, indexed_path)
+ else:
+ # Single child
+ walk(child, child_path)
+
+ walk(root, ())
+
+ def _generate_xml_template_from_text(self, role_prefix: str, text: str) -> str:
+ """Generate scalar-only Jinja2 template."""
+ prolog, body = self._split_xml_prolog(text)
+
+ parser = ET.XMLParser(target=ET.TreeBuilder(insert_comments=True)) # nosec B314
+ parser.feed(body)
+ root = parser.close()
+
+ self._apply_jinja_to_xml_tree(role_prefix, root)
+
+ indent = getattr(ET, "indent", None)
+ if indent is not None:
+ indent(root, space=" ") # type: ignore[arg-type]
+
+ xml_body = ET.tostring(root, encoding="unicode")
+ return prolog + xml_body
+
+ def _generate_xml_template_with_loops_from_text(
+ self,
+ role_prefix: str,
+ text: str,
+ loop_candidates: list[LoopCandidate],
+ ) -> str:
+ """Generate Jinja2 template with for loops."""
+
+ prolog, body = self._split_xml_prolog(text)
+
+ # Parse with comments preserved
+ parser = ET.XMLParser(target=ET.TreeBuilder(insert_comments=True)) # nosec B314
+ parser.feed(body)
+ root = parser.close()
+
+ # Apply Jinja transformations (including loop markers)
+ self._apply_jinja_to_xml_tree(role_prefix, root, loop_candidates)
+
+ # Convert to string
+ indent = getattr(ET, "indent", None)
+ if indent is not None:
+ indent(root, space=" ") # type: ignore[arg-type]
+
+ xml_body = ET.tostring(root, encoding="unicode")
+
+ # Post-process to replace loop markers with actual Jinja loops
+ xml_body = self._insert_xml_loops(xml_body, role_prefix, loop_candidates, root)
+
+ return prolog + xml_body
+
+ def _insert_xml_loops(
+ self,
+ xml_str: str,
+ role_prefix: str,
+ loop_candidates: list[LoopCandidate],
+ root: ET.Element,
+ ) -> str:
+ """
+ Post-process XML string to insert Jinja2 for loops.
+
+ This replaces markers with actual loop constructs.
+ """
+
+ # Build a sample element for each loop to use as template
+ lines = xml_str.split("\n")
+ result_lines = []
+
+ for line in lines:
+ # Check if this line contains a loop marker
+ if "", start)
+ tag_name = line[start:end].strip()
+
+ # Find matching loop candidate
+ candidate = None
+ for cand in loop_candidates:
+ if cand.path and cand.path[-1] == tag_name:
+ candidate = cand
+ break
+
+ if candidate:
+ # Get indentation from current line
+ indent_level = len(line) - len(line.lstrip())
+ indent_str = " " * indent_level
+
+ # Generate loop variable name
+ collection_var = self.make_var_name(role_prefix, candidate.path)
+ item_var = candidate.loop_var
+
+ # Create sample element with ALL possible fields from ALL items
+ if candidate.items:
+ # Merge all items to get the union of all fields
+ merged_dict = self._merge_dicts_for_template(candidate.items)
+
+ sample_elem = self._dict_to_xml_element(
+ tag_name, merged_dict, item_var
+ )
+
+ # Apply indentation to the sample element
+ ET.indent(sample_elem, space=" ")
+
+ # Convert sample to string
+ sample_str = ET.tostring(
+ sample_elem, encoding="unicode"
+ ).strip()
+
+ # Add proper indentation to each line of the sample
+ sample_lines = sample_str.split("\n")
+
+ # Build loop
+ result_lines.append(
+ f"{indent_str}{{% for {item_var} in {collection_var} %}}"
+ )
+ # Add each line of the sample with proper indentation
+ for sample_line in sample_lines:
+ result_lines.append(f"{indent_str} {sample_line}")
+ result_lines.append(f"{indent_str}{{% endfor %}}")
+ else:
+ # Keep the marker if we can't find the candidate
+ result_lines.append(line)
+ else:
+ result_lines.append(line)
+
+ # Post-process to replace and with Jinja2 conditionals
+ final_lines = []
+ for line in result_lines:
+ # Replace with {% if var.field is defined %}
+ if "", start)
+ condition = line[start:end]
+ indent = len(line) - len(line.lstrip())
+ final_lines.append(f"{' ' * indent}{{% if {condition} is defined %}}")
+ # Replace with {% endif %}
+ elif "
+
+
+
+
+ web-log
+ Access log messages grouped.
+
+
+
+ 31100
+ ^2|^3
+ is_simple_http_request
+ Ignored URLs (simple queries).
+
+
+
+ 31100
+ ^4
+ Web server 400 error code.
+
+
+
+ 31101
+ \.jpg$|\.gif$|favicon\.ico$|\.png$|robots\.txt$|\.css$|\.js$|\.jpeg$
+ is_simple_http_request
+ Ignored extensions on 400 error codes.
+
+
+
+ 31100,31108
+ =select%20|select\+|insert%20|%20from%20|%20where%20|union%20|
+ union\+|where\+|null,null|xp_cmdshell
+ SQL injection attempt.
+ attack,sql_injection,
+
+
+
+ 31100
+
+
+ %027|%00|%01|%7f|%2E%2E|%0A|%0D|\.\./\.\.|\.\.\\\.\.|echo;|
+ cmd\.exe|root\.exe|_mem_bin|msadc|/winnt/|/boot\.ini|
+ /x90/|default\.ida|/sumthin|nsiislog\.dll|chmod%|wget%|cd%20|
+ exec%20|\.\./\.\.//|%5C\.\./%5C|\./\./\./\./|2e%2e%5c%2e|\\x5C\\x5C
+ Common web attack.
+ attack,
+
+
+
+ 31100
+ %3Cscript|%3C%2Fscript|script>|script%3E|SRC=javascript|IMG%20|
+ %20ONLOAD=|INPUT%20|iframe%20
+ XSS (Cross Site Scripting) attempt.
+ attack,
+
+
+
+ 31103, 31104, 31105
+ ^200
+ A web attack returned code 200 (success).
+ attack,
+
+
+
+ 31100
+ \?-d|\?-s|\?-a|\?-b|\?-w
+ PHP CGI-bin vulnerability attempt.
+ attack,
+
+
+
+ 31100
+ \+as\+varchar
+ %2Bchar\(\d+\)%2Bchar\(\d+\)%2Bchar\(\d+\)%2Bchar\(\d+\)%2Bchar\(\d+\)%2Bchar\(\d+\)
+ MSSQL Injection attempt (/ur.php, urchin.js)
+ attack,
+
+
+
+
+
+ 31103, 31104, 31105
+ ^/search\.php\?search=|^/index\.php\?searchword=
+ Ignored URLs for the web attacks
+
+
+
+ 31100
+ URL too long. Higher than allowed on most
+ browsers. Possible attack.
+ invalid_access,
+
+
+
+
+
+ 31100
+ ^50
+ Web server 500 error code (server error).
+
+
+
+ 31120
+ ^501
+ Web server 501 error code (Not Implemented).
+
+
+
+ 31120
+ ^500
+ alert_by_email
+ Web server 500 error code (Internal Error).
+ system_error,
+
+
+
+ 31120
+ ^503
+ alert_by_email
+ Web server 503 error code (Service unavailable).
+
+
+
+
+
+ 31101
+ is_valid_crawler
+ Ignoring google/msn/yahoo bots.
+
+
+
+
+ 31101
+ ^499
+ Ignored 499's on nginx.
+
+
+
+
+ 31101
+
+ Multiple web server 400 error codes
+ from same source ip.
+ web_scan,recon,
+
+
+
+ 31103
+
+ Multiple SQL injection attempts from same
+ source ip.
+ attack,sql_injection,
+
+
+
+ 31104
+
+ Multiple common web attacks from same source ip.
+ attack,
+
+
+
+ 31105
+
+ Multiple XSS (Cross Site Scripting) attempts
+ from same source ip.
+ attack,
+
+
+
+ 31121
+
+ Multiple web server 501 error code (Not Implemented).
+ web_scan,recon,
+
+
+
+ 31122
+
+ Multiple web server 500 error code (Internal Error).
+ system_error,
+
+
+
+ 31123
+
+ Multiple web server 503 error code (Service unavailable).
+ web_scan,recon,
+
+
+
+ 31100
+ =%27|select%2B|insert%2B|%2Bfrom%2B|%2Bwhere%2B|%2Bunion%2B
+ SQL injection attempt.
+ attack,sqlinjection,
+
+
+
+ 31100
+ %EF%BC%87|%EF%BC%87|%EF%BC%87|%2531|%u0053%u0045
+ SQL injection attempt.
+ attack,sqlinjection,
+
+
+
diff --git a/tests/test_base_handler.py b/tests/test_base_handler.py
new file mode 100644
index 0000000..5ee761f
--- /dev/null
+++ b/tests/test_base_handler.py
@@ -0,0 +1,34 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+
+from jinjaturtle.handlers.base import BaseHandler
+
+
+def test_split_inline_comment_handles_quoted_hash():
+ # The '#' inside quotes should not start a comment; the one outside should.
+ text = " 'foo # not comment' # real"
+ handler = BaseHandler()
+ value, comment = handler._split_inline_comment(text, {"#"})
+ assert "not comment" in value
+ assert comment.strip() == "# real"
+
+
+def test_base_handler_abstract_methods_raise_not_implemented(tmp_path: Path):
+ """
+ Ensure the abstract methods on BaseHandler all raise NotImplementedError.
+ This covers the stub implementations.
+ """
+ handler = BaseHandler()
+ dummy_path = tmp_path / "dummy.cfg"
+
+ with pytest.raises(NotImplementedError):
+ handler.parse(dummy_path)
+
+ with pytest.raises(NotImplementedError):
+ handler.flatten(object())
+
+ with pytest.raises(NotImplementedError):
+ handler.generate_jinja2_template(parsed=object(), role_prefix="role")
diff --git a/tests/test_cli.py b/tests/test_cli.py
index 4df5bf0..a880135 100644
--- a/tests/test_cli.py
+++ b/tests/test_cli.py
@@ -1,10 +1,6 @@
from __future__ import annotations
-import sys
from pathlib import Path
-
-import pytest
-
from jinjaturtle import cli
SAMPLES_DIR = Path(__file__).parent / "samples"
@@ -18,7 +14,7 @@ def test_cli_stdout_toml(capsys):
cfg_path = SAMPLES_DIR / "tom.toml"
exit_code = cli._main([str(cfg_path), "-r", "jinjaturtle"])
- assert exit_code == 0
+ assert exit_code
captured = capsys.readouterr()
out = captured.out
@@ -52,7 +48,7 @@ def test_cli_writes_output_files(tmp_path, capsys):
]
)
- assert exit_code == 0
+ assert exit_code
assert defaults_path.is_file()
assert template_path.is_file()
@@ -66,20 +62,3 @@ def test_cli_writes_output_files(tmp_path, capsys):
# When writing to files, we shouldn't print the big headers
assert "# defaults/main.yml" not in captured.out
assert "# config.j2" not in captured.out
-
-
-def test_main_wrapper_exits_with_zero(monkeypatch):
- """
- Cover the main() wrapper that raises SystemExit.
- """
- cfg_path = SAMPLES_DIR / "tom.toml"
- monkeypatch.setattr(
- sys,
- "argv",
- ["jinjaturtle", str(cfg_path), "-r", "jinjaturtle"],
- )
-
- with pytest.raises(SystemExit) as exc:
- cli.main()
-
- assert exc.value.code == 0
diff --git a/tests/test_core.py b/tests/test_core.py
deleted file mode 100644
index 7056518..0000000
--- a/tests/test_core.py
+++ /dev/null
@@ -1,375 +0,0 @@
-from __future__ import annotations
-
-from pathlib import Path
-import configparser
-import pytest
-import textwrap
-import yaml
-
-import jinjaturtle.core as core
-from jinjaturtle.core import (
- detect_format,
- parse_config,
- flatten_config,
- generate_defaults_yaml,
- generate_template,
- make_var_name,
-)
-
-SAMPLES_DIR = Path(__file__).parent / "samples"
-
-
-def test_make_var_name_basic():
- # simple sanity checks on the naming rules
- assert (
- make_var_name("jinjaturtle", ("somesection", "foo"))
- == "jinjaturtle_somesection_foo"
- )
- assert (
- make_var_name("JinjaTurtle", ("Other-Section", "some value"))
- == "jinjaturtle_other_section_some_value"
- )
- # no trailing underscores, all lowercase, no spaces
- name = make_var_name("MyRole", (" Section Name ", "Key-Name "))
- assert name == name.lower()
- assert " " not in name
- assert not name.endswith("_")
-
-
-def test_make_var_name_empty_path_returns_prefix():
- # Cover the branch where there are no path components.
- assert make_var_name("MyRole", ()) == "myrole"
-
-
-def test_detect_format_explicit_overrides_suffix(tmp_path: Path):
- # Explicit format should win over file suffix.
- cfg_path = tmp_path / "config.ini"
- cfg_path.write_text("[section]\nkey=value\n", encoding="utf-8")
-
- fmt = detect_format(cfg_path, explicit="toml")
- assert fmt == "toml"
-
-
-def test_detect_format_fallback_ini(tmp_path: Path):
- # Unknown suffix should fall back to "ini".
- cfg_path = tmp_path / "weird.cnf"
- cfg_path.write_text("[section]\nkey=value\n", encoding="utf-8")
-
- fmt, parsed = parse_config(cfg_path) # no explicit fmt
- assert fmt == "ini"
- # parsed should be an INI ConfigParser with our section/key
- flat = flatten_config(fmt, parsed)
- assert any(path == ("section", "key") for path, _ in flat)
-
-
-def test_toml_sample_roundtrip():
- toml_path = SAMPLES_DIR / "tom.toml"
- assert toml_path.is_file(), f"Missing sample TOML file: {toml_path}"
-
- fmt, parsed = parse_config(toml_path)
- assert fmt == "toml"
-
- flat_items = flatten_config(fmt, parsed)
- assert flat_items
-
- defaults_yaml = generate_defaults_yaml("jinjaturtle", flat_items)
- defaults = yaml.safe_load(defaults_yaml)
-
- # defaults should be a non-empty dict
- assert isinstance(defaults, dict)
- assert defaults, "Expected non-empty defaults for TOML sample"
-
- # all keys should be lowercase, start with prefix, and have no spaces
- for key in defaults:
- assert key.startswith("jinjaturtle_")
- assert key == key.lower()
- assert " " not in key
-
- # template generation ā **now with original_text**
- original_text = toml_path.read_text(encoding="utf-8")
- template = generate_template(
- fmt, parsed, "jinjaturtle", original_text=original_text
- )
- assert isinstance(template, str)
- assert template.strip()
-
- # comments from the original file should now be preserved
- assert "# This is a TOML document" in template
-
- # each default variable name should appear in the template as a Jinja placeholder
- for var_name in defaults:
- assert (
- var_name in template
- ), f"Variable {var_name} not referenced in TOML template"
-
-
-def test_ini_php_sample_roundtrip():
- ini_path = SAMPLES_DIR / "php.ini"
- assert ini_path.is_file(), f"Missing sample INI file: {ini_path}"
-
- fmt, parsed = parse_config(ini_path)
- assert fmt == "ini"
-
- flat_items = flatten_config(fmt, parsed)
- assert flat_items, "Expected at least one flattened item from php.ini sample"
-
- defaults_yaml = generate_defaults_yaml("php", flat_items)
- defaults = yaml.safe_load(defaults_yaml)
-
- # defaults should be a non-empty dict
- assert isinstance(defaults, dict)
- assert defaults, "Expected non-empty defaults for php.ini sample"
-
- # all keys should be lowercase, start with prefix, and have no spaces
- for key in defaults:
- assert key.startswith("php_")
- assert key == key.lower()
- assert " " not in key
-
- # template generation
- original_text = ini_path.read_text(encoding="utf-8")
- template = generate_template(fmt, parsed, "php", original_text=original_text)
- assert "; About this file" in template
- assert isinstance(template, str)
- assert template.strip(), "Template for php.ini sample should not be empty"
-
- # each default variable name should appear in the template as a Jinja placeholder
- for var_name in defaults:
- assert (
- var_name in template
- ), f"Variable {var_name} not referenced in INI template"
-
-
-def test_formats_match_expected_extensions():
- """
- Sanity check that format detection lines up with the filenames
- weāre using for the samples.
- """
- toml_path = SAMPLES_DIR / "tom.toml"
- ini_path = SAMPLES_DIR / "php.ini"
-
- fmt_toml, _ = parse_config(toml_path)
- fmt_ini, _ = parse_config(ini_path)
-
- assert fmt_toml == "toml"
- assert fmt_ini == "ini"
-
-
-def test_parse_config_toml_missing_tomllib(monkeypatch):
- """
- Force tomllib to None to hit the RuntimeError branch when parsing TOML.
- """
- toml_path = SAMPLES_DIR / "tom.toml"
-
- # Simulate an environment without tomllib/tomli
- monkeypatch.setattr(core, "tomllib", None)
-
- with pytest.raises(RuntimeError) as exc:
- core.parse_config(toml_path, fmt="toml")
- assert "tomllib/tomli is required" in str(exc.value)
-
-
-def test_parse_config_unsupported_format(tmp_path: Path):
- """
- Hit the ValueError in parse_config when fmt is not a supported format.
- """
- cfg_path = tmp_path / "config.whatever"
- cfg_path.write_text("", encoding="utf-8")
-
- with pytest.raises(ValueError):
- parse_config(cfg_path, fmt="bogus")
-
-
-def test_generate_template_type_and_format_errors():
- """
- Exercise the error branches in generate_template:
- - toml with non-dict parsed
- - ini with non-ConfigParser parsed
- - yaml with wrong parsed type
- - completely unsupported fmt (with and without original_text)
- """
- # wrong type for TOML
- with pytest.raises(TypeError):
- generate_template("toml", parsed="not a dict", role_prefix="role")
-
- # wrong type for INI
- with pytest.raises(TypeError):
- generate_template("ini", parsed={"not": "a configparser"}, role_prefix="role")
-
- # wrong type for YAML
- with pytest.raises(TypeError):
- generate_template("yaml", parsed=None, role_prefix="role")
-
- # unsupported format, no original_text
- with pytest.raises(ValueError):
- generate_template("bogusfmt", parsed=None, role_prefix="role")
-
- # unsupported format, with original_text
- with pytest.raises(ValueError):
- generate_template(
- "bogusfmt",
- parsed=None,
- role_prefix="role",
- original_text="foo=bar",
- )
-
-
-def test_normalize_default_value_true_false_strings():
- # 'true'/'false' strings should be preserved as strings and double-quoted in YAML.
- flat_items = [
- (("section", "foo"), "true"),
- (("section", "bar"), "FALSE"),
- ]
- defaults_yaml = generate_defaults_yaml("role", flat_items)
- data = yaml.safe_load(defaults_yaml)
- assert data["role_section_foo"] == "true"
- assert data["role_section_bar"] == "FALSE"
-
-
-def test_split_inline_comment_handles_quoted_hash():
- # The '#' inside quotes should not start a comment; the one outside should.
- text = " 'foo # not comment' # real"
- value, comment = core._split_inline_comment(text, {"#"})
- assert "not comment" in value
- assert comment.strip() == "# real"
-
-
-def test_generate_template_fallback_toml_and_ini():
- # When original_text is not provided, generate_template should use the
- # older fallback generators based on the parsed structures.
- parsed_toml = {
- "title": "Example",
- "server": {"port": 8080, "host": "127.0.0.1"},
- "logging": {
- "file": {"path": "/tmp/app.log"}
- }, # nested table to hit recursive walk
- }
- tmpl_toml = generate_template("toml", parsed=parsed_toml, role_prefix="role")
- assert "[server]" in tmpl_toml
- assert "role_server_port" in tmpl_toml
- assert "[logging]" in tmpl_toml or "[logging.file]" in tmpl_toml
-
- parser = configparser.ConfigParser()
- # foo is quoted in the INI text to hit the "preserve quotes" branch
- parser["section"] = {"foo": '"bar"', "num": "42"}
- tmpl_ini = generate_template("ini", parsed=parser, role_prefix="role")
- assert "[section]" in tmpl_ini
- assert "role_section_foo" in tmpl_ini
- assert '"{{ role_section_foo }}"' in tmpl_ini # came from quoted INI value
-
-
-def test_generate_ini_template_from_text_edge_cases():
- # Cover CRLF newlines, lines without '=', and lines with no key before '='.
- text = "[section]\r\nkey=value\r\nnoequals\r\n = bare\r\n"
- tmpl = core._generate_ini_template_from_text("role", text)
- # We don't care about exact formatting here, just that it runs and
- # produces some reasonable output.
- assert "[section]" in tmpl
- assert "role_section_key" in tmpl
- # The "noequals" line should be preserved as-is.
- assert "noequals" in tmpl
- # The " = bare" line has no key and should be left untouched.
- assert " = bare" in tmpl
-
-
-def test_generate_toml_template_from_text_edge_cases():
- # Cover CRLF newlines, lines without '=', empty keys, and inline tables
- # that both parse successfully and fail parsing.
- text = (
- "# comment\r\n"
- "[table]\r\n"
- "noequals\r\n"
- " = 42\r\n"
- 'inline_good = { name = "abc", value = 1 }\r\n'
- "inline_bad = { invalid = }\r\n"
- )
- tmpl = core._generate_toml_template_from_text("role", text)
- # The good inline table should expand into two separate variables.
- assert "role_table_inline_good_name" in tmpl
- assert "role_table_inline_good_value" in tmpl
- # The bad inline table should fall back to scalar handling.
- assert "role_table_inline_bad" in tmpl
- # Ensure the lines without '=' / empty key were handled without exploding.
- assert "[table]" in tmpl
- assert "noequals" in tmpl
-
-
-def test_yaml_roundtrip_with_list_and_comment(tmp_path: Path):
- yaml_text = """
- # Top comment
- foo: "bar"
-
- blah:
- - something
- - else
- """
- cfg_path = tmp_path / "config.yaml"
- cfg_path.write_text(textwrap.dedent(yaml_text), encoding="utf-8")
-
- fmt, parsed = parse_config(cfg_path)
- assert fmt == "yaml"
-
- flat_items = flatten_config(fmt, parsed)
- defaults_yaml = generate_defaults_yaml("foobar", flat_items)
- defaults = yaml.safe_load(defaults_yaml)
-
- # Defaults: keys are flattened with indices
- assert defaults["foobar_foo"] == "bar"
- assert defaults["foobar_blah_0"] == "something"
- assert defaults["foobar_blah_1"] == "else"
-
- # Template generation (preserving comments)
- original_text = cfg_path.read_text(encoding="utf-8")
- template = generate_template(fmt, parsed, "foobar", original_text=original_text)
-
- # Comment preserved
- assert "# Top comment" in template
-
- # Scalar replacement
- assert "foo:" in template
- assert "foobar_foo" in template
-
- # List items use indexed vars, not "item"
- assert "foobar_blah_0" in template
- assert "foobar_blah_1" in template
- assert "{{ foobar_blah }}" not in template
- assert "foobar_blah_item" not in template
-
-
-def test_json_roundtrip(tmp_path: Path):
- json_text = """
- {
- "foo": "bar",
- "nested": {
- "a": 1,
- "b": true
- },
- "list": [10, 20]
- }
- """
- cfg_path = tmp_path / "config.json"
- cfg_path.write_text(textwrap.dedent(json_text), encoding="utf-8")
-
- fmt, parsed = parse_config(cfg_path)
- assert fmt == "json"
-
- flat_items = flatten_config(fmt, parsed)
- defaults_yaml = generate_defaults_yaml("foobar", flat_items)
- defaults = yaml.safe_load(defaults_yaml)
-
- # Defaults: nested keys and list indices
- assert defaults["foobar_foo"] == "bar"
- assert defaults["foobar_nested_a"] == 1
- # Bool normalized to string "true"
- assert defaults["foobar_nested_b"] == "true"
- assert defaults["foobar_list_0"] == 10
- assert defaults["foobar_list_1"] == 20
-
- # Template generation (JSON has no comments, so we just rebuild)
- template = generate_template(fmt, parsed, "foobar")
-
- assert '"foo": "{{ foobar_foo }}"' in template
- assert "foobar_nested_a" in template
- assert "foobar_nested_b" in template
- assert "foobar_list_0" in template
- assert "foobar_list_1" in template
diff --git a/tests/test_core_utils.py b/tests/test_core_utils.py
new file mode 100644
index 0000000..c8e41e1
--- /dev/null
+++ b/tests/test_core_utils.py
@@ -0,0 +1,193 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+import yaml
+
+import jinjaturtle.core as core
+from jinjaturtle.core import (
+ detect_format,
+ parse_config,
+ flatten_config,
+ generate_ansible_yaml,
+ generate_jinja2_template,
+ make_var_name,
+)
+
+SAMPLES_DIR = Path(__file__).parent / "samples"
+
+
+def test_make_var_name_basic():
+ # simple sanity checks on the naming rules
+ assert (
+ make_var_name("jinjaturtle", ("somesection", "foo"))
+ == "jinjaturtle_somesection_foo"
+ )
+ assert (
+ make_var_name("JinjaTurtle", ("Other-Section", "some value"))
+ == "jinjaturtle_other_section_some_value"
+ )
+ # no trailing underscores, all lowercase, no spaces
+ name = make_var_name("MyRole", (" Section Name ", "Key-Name "))
+ assert name == name.lower()
+ assert " " not in name
+ assert not name.endswith("_")
+
+
+def test_make_var_name_empty_path_returns_prefix():
+ # Cover the branch where there are no path components.
+ assert make_var_name("MyRole", ()) == "myrole"
+
+
+def test_detect_format_explicit_overrides_suffix(tmp_path: Path):
+ # Explicit format should win over file suffix.
+ cfg_path = tmp_path / "config.ini"
+ cfg_path.write_text("[section]\nkey=value\n", encoding="utf-8")
+
+ fmt = detect_format(cfg_path, explicit="toml")
+ assert fmt == "toml"
+
+
+def test_detect_format_fallback_ini(tmp_path: Path):
+ # Unknown suffix should fall back to "ini".
+ cfg_path = tmp_path / "weird.cnf"
+ cfg_path.write_text("[section]\nkey=value\n", encoding="utf-8")
+
+ fmt, parsed = parse_config(cfg_path) # no explicit fmt
+ assert fmt == "ini"
+ # parsed should be an INI ConfigParser with our section/key
+ flat = flatten_config(fmt, parsed)
+ assert any(path == ("section", "key") for path, _ in flat)
+
+
+def test_formats_match_expected_extensions():
+ """
+ Sanity check that format detection lines up with the filenames
+ weāre using for the samples.
+ """
+ toml_path = SAMPLES_DIR / "tom.toml"
+ ini_path = SAMPLES_DIR / "php.ini"
+ xml_path = SAMPLES_DIR / "ossec.xml"
+
+ fmt_toml, _ = parse_config(toml_path)
+ fmt_ini, _ = parse_config(ini_path)
+ fmt_xml, _ = parse_config(xml_path)
+
+ assert fmt_toml == "toml"
+ assert fmt_ini == "ini"
+ assert fmt_xml == "xml"
+
+
+def test_parse_config_unsupported_format(tmp_path: Path):
+ """
+ Hit the ValueError in parse_config when fmt is not a supported format.
+ """
+ cfg_path = tmp_path / "config.whatever"
+ cfg_path.write_text("", encoding="utf-8")
+
+ with pytest.raises(ValueError):
+ parse_config(cfg_path, fmt="bogus")
+
+
+def test_generate_jinja2_template_type_and_format_errors():
+ """
+ Exercise the error branches in generate_jinja2_template:
+ - toml with non-dict parsed
+ - ini with non-ConfigParser parsed
+ - yaml with wrong parsed type
+ - json with wrong parsed type
+ - completely unsupported fmt (with and without original_text)
+ """
+ # wrong type for TOML
+ with pytest.raises(TypeError):
+ generate_jinja2_template("toml", parsed="not a dict", role_prefix="role")
+
+ # wrong type for INI
+ with pytest.raises(TypeError):
+ generate_jinja2_template(
+ "ini", parsed={"not": "a configparser"}, role_prefix="role"
+ )
+
+ # wrong type for YAML
+ with pytest.raises(TypeError):
+ generate_jinja2_template("yaml", parsed=None, role_prefix="role")
+
+ # wrong type for JSON
+ with pytest.raises(TypeError):
+ generate_jinja2_template("json", parsed=None, role_prefix="role")
+
+ # unsupported format, no original_text
+ with pytest.raises(ValueError):
+ generate_jinja2_template("bogusfmt", parsed=None, role_prefix="role")
+
+ # unsupported format, with original_text
+ with pytest.raises(ValueError):
+ generate_jinja2_template(
+ "bogusfmt",
+ parsed=None,
+ role_prefix="role",
+ original_text="foo=bar",
+ )
+
+
+def test_normalize_default_value_true_false_strings():
+ # 'true'/'false' strings should be preserved as strings and double-quoted in YAML.
+ flat_items = [
+ (("section", "foo"), "true"),
+ (("section", "bar"), "FALSE"),
+ ]
+ ansible_yaml = generate_ansible_yaml("role", flat_items)
+ data = yaml.safe_load(ansible_yaml)
+ assert data["role_section_foo"] == "true"
+ assert data["role_section_bar"] == "FALSE"
+
+
+def test_fallback_str_representer_for_unknown_type():
+ """
+ Ensure that the _fallback_str_representer is used for objects that
+ PyYAML doesn't know how to represent.
+ """
+
+ class Weird:
+ def __str__(self) -> str:
+ return "weird-value"
+
+ data = {"foo": Weird()}
+
+ dumped = yaml.dump(
+ data,
+ Dumper=core._TurtleDumper,
+ sort_keys=False,
+ default_flow_style=False,
+ )
+
+ # It should serialize without error, and the string form should appear.
+ assert "weird-value" in dumped
+
+
+def test_normalize_default_value_bool_inputs_are_stringified():
+ """
+ Boolean values are now preserved as booleans in YAML (not stringified).
+ This supports proper type preservation for JSON and other formats.
+ """
+ flat_items = [
+ (("section", "flag_true"), True),
+ (("section", "flag_false"), False),
+ ]
+ ansible_yaml = generate_ansible_yaml("role", flat_items)
+ data = yaml.safe_load(ansible_yaml)
+
+ # Booleans are now preserved as booleans
+ assert data["role_section_flag_true"] is True
+ assert data["role_section_flag_false"] is False
+
+
+def test_flatten_config_unsupported_format():
+ """
+ Calling flatten_config with an unknown fmt should raise ValueError.
+ """
+ with pytest.raises(ValueError) as exc:
+ flatten_config("bogusfmt", parsed=None)
+
+ assert "Unsupported format" in str(exc.value)
diff --git a/tests/test_ini_handler.py b/tests/test_ini_handler.py
new file mode 100644
index 0000000..3bf1252
--- /dev/null
+++ b/tests/test_ini_handler.py
@@ -0,0 +1,93 @@
+from __future__ import annotations
+
+from pathlib import Path
+import configparser
+import pytest
+import yaml
+
+from jinjaturtle.core import (
+ parse_config,
+ flatten_config,
+ generate_ansible_yaml,
+ generate_jinja2_template,
+)
+from jinjaturtle.handlers.ini import IniHandler
+
+SAMPLES_DIR = Path(__file__).parent / "samples"
+
+
+def test_ini_php_sample_roundtrip():
+ ini_path = SAMPLES_DIR / "php.ini"
+ assert ini_path.is_file(), f"Missing sample INI file: {ini_path}"
+
+ fmt, parsed = parse_config(ini_path)
+ assert fmt == "ini"
+
+ flat_items = flatten_config(fmt, parsed)
+ assert flat_items, "Expected at least one flattened item from php.ini sample"
+
+ ansible_yaml = generate_ansible_yaml("php", flat_items)
+ defaults = yaml.safe_load(ansible_yaml)
+
+ # defaults should be a non-empty dict
+ assert isinstance(defaults, dict)
+ assert defaults, "Expected non-empty defaults for php.ini sample"
+
+ # all keys should be lowercase, start with prefix, and have no spaces
+ for key in defaults:
+ assert key.startswith("php_")
+ assert key == key.lower()
+ assert " " not in key
+
+ # template generation
+ original_text = ini_path.read_text(encoding="utf-8")
+ template = generate_jinja2_template(fmt, parsed, "php", original_text=original_text)
+ assert "; About this file" in template
+ assert isinstance(template, str)
+ assert template.strip(), "Template for php.ini sample should not be empty"
+
+ # each default variable name should appear in the template as a Jinja placeholder
+ for var_name in defaults:
+ assert (
+ var_name in template
+ ), f"Variable {var_name} not referenced in INI template"
+
+
+def test_generate_jinja2_template_fallback_ini():
+ """
+ When original_text is not provided, generate_jinja2_template should use the
+ structural fallback path for INI configs.
+ """
+ parser = configparser.ConfigParser()
+ # foo is quoted in the INI text to hit the "preserve quotes" branch
+ parser["section"] = {"foo": '"bar"', "num": "42"}
+
+ tmpl_ini = generate_jinja2_template("ini", parsed=parser, role_prefix="role")
+ assert "[section]" in tmpl_ini
+ assert "role_section_foo" in tmpl_ini
+ assert '"{{ role_section_foo }}"' in tmpl_ini # came from quoted INI value
+
+
+def test_generate_ini_template_from_text_edge_cases():
+ # Cover CRLF newlines, lines without '=', and lines with no key before '='.
+ text = "[section]\r\nkey=value\r\nnoequals\r\n = bare\r\n"
+ handler = IniHandler()
+ tmpl = handler._generate_ini_template_from_text("role", text)
+
+ # We don't care about exact formatting here, just that it runs and
+ # produces some reasonable output.
+ assert "[section]" in tmpl
+ assert "role_section_key" in tmpl
+ # The "noequals" line should be preserved as-is.
+ assert "noequals" in tmpl
+ # The " = bare" line has no key and should be left untouched.
+ assert " = bare" in tmpl
+
+
+def test_ini_handler_flatten_type_error():
+ """
+ Passing a non-ConfigParser into IniHandler.flatten should raise TypeError.
+ """
+ handler = IniHandler()
+ with pytest.raises(TypeError):
+ handler.flatten(parsed={"not": "a configparser"})
diff --git a/tests/test_json_handler.py b/tests/test_json_handler.py
new file mode 100644
index 0000000..dd502b1
--- /dev/null
+++ b/tests/test_json_handler.py
@@ -0,0 +1,63 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+import yaml
+
+from jinjaturtle.core import (
+ parse_config,
+ flatten_config,
+ generate_ansible_yaml,
+ analyze_loops,
+ generate_jinja2_template,
+)
+from jinjaturtle.handlers.json import JsonHandler
+
+SAMPLES_DIR = Path(__file__).parent / "samples"
+
+
+def test_json_roundtrip():
+ json_path = SAMPLES_DIR / "foo.json"
+ assert json_path.is_file(), f"Missing sample JSON file: {json_path}"
+
+ fmt, parsed = parse_config(json_path)
+ assert fmt == "json"
+
+ # With loop detection
+ loop_candidates = analyze_loops(fmt, parsed)
+ flat_items = flatten_config(fmt, parsed, loop_candidates)
+ ansible_yaml = generate_ansible_yaml("foobar", flat_items, loop_candidates)
+ defaults = yaml.safe_load(ansible_yaml)
+
+ # Defaults: nested keys
+ assert defaults["foobar_foo"] == "bar"
+ assert defaults["foobar_nested_a"] == 1
+ # Booleans are now preserved as booleans (not stringified)
+ assert defaults["foobar_nested_b"] is True
+ # List should be a list (not flattened to scalars)
+ assert defaults["foobar_list"] == [10, 20]
+
+ # Template generation with loops
+ template = generate_jinja2_template("json", parsed, "foobar", None, loop_candidates)
+
+ # Template should use | tojson for type preservation
+ assert "{{ foobar_foo | tojson }}" in template
+ assert "{{ foobar_nested_a | tojson }}" in template
+ assert "{{ foobar_nested_b | tojson }}" in template
+
+ # List should use loop (not scalar indices)
+ assert "{% for" in template
+ assert "foobar_list" in template
+ # Should NOT have scalar indices
+ assert "foobar_list_0" not in template
+ assert "foobar_list_1" not in template
+
+
+def test_generate_jinja2_template_json_type_error():
+ """
+ Wrong type for JSON in JsonHandler.generate_jinja2_template should raise TypeError.
+ """
+ handler = JsonHandler()
+ with pytest.raises(TypeError):
+ handler.generate_jinja2_template(parsed="not a dict", role_prefix="role")
diff --git a/tests/test_roundtrip.py b/tests/test_roundtrip.py
new file mode 100644
index 0000000..5182e8c
--- /dev/null
+++ b/tests/test_roundtrip.py
@@ -0,0 +1,566 @@
+"""
+Roundtrip tests: Generate config ā template/YAML ā regenerate config ā compare.
+
+These tests verify that:
+1. Generated Jinja2 template + Ansible YAML can reproduce the original config
+2. The regenerated config is semantically equivalent (allowing whitespace differences)
+3. No data loss occurs during the template generation process
+
+This is the ultimate validation - if the roundtrip works, the templates are correct.
+"""
+
+from __future__ import annotations
+
+import json
+import yaml
+from pathlib import Path
+from typing import Any
+from jinja2 import Environment, StrictUndefined
+
+import pytest
+
+from jinjaturtle.core import (
+ parse_config,
+ analyze_loops,
+ flatten_config,
+ generate_ansible_yaml,
+ generate_jinja2_template,
+)
+
+
+def render_template(template: str, variables: dict[str, Any]) -> str:
+ """Render a Jinja2 template with variables."""
+ env = Environment(undefined=StrictUndefined)
+ jinja_template = env.from_string(template)
+ return jinja_template.render(variables)
+
+
+class TestRoundtripJSON:
+ """Roundtrip tests for JSON files."""
+
+ def test_foo_json_roundtrip(self):
+ """Test foo.json can be perfectly regenerated from template."""
+ samples_dir = Path(__file__).parent / "samples"
+ json_file = samples_dir / "foo.json"
+
+ if not json_file.exists():
+ pytest.skip("foo.json not found")
+
+ # Read original
+ original_text = json_file.read_text()
+ original_data = json.loads(original_text)
+
+ # Generate template and YAML
+ fmt, parsed = parse_config(json_file)
+ loop_candidates = analyze_loops(fmt, parsed)
+ flat_items = flatten_config(fmt, parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(fmt, parsed, "test", None, loop_candidates)
+
+ # Load variables from YAML
+ variables = yaml.safe_load(ansible_yaml)
+
+ # Render template
+ regenerated_text = render_template(template, variables)
+ regenerated_data = json.loads(regenerated_text)
+
+ # Compare data structures (should match exactly)
+ assert regenerated_data == original_data, (
+ f"Regenerated JSON differs from original\n"
+ f"Original: {json.dumps(original_data, indent=2, sort_keys=True)}\n"
+ f"Regenerated: {json.dumps(regenerated_data, indent=2, sort_keys=True)}"
+ )
+
+ def test_json_all_types_roundtrip(self):
+ """Test JSON with all data types roundtrips perfectly."""
+ json_text = """
+ {
+ "string": "value",
+ "number": 42,
+ "float": 3.14,
+ "boolean": true,
+ "false_val": false,
+ "null_value": null,
+ "array": [1, 2, 3],
+ "object": {
+ "nested": "data"
+ }
+ }
+ """
+
+ original_data = json.loads(json_text)
+
+ # Generate template and YAML
+ loop_candidates = analyze_loops("json", original_data)
+ flat_items = flatten_config("json", original_data, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "json", original_data, "test", None, loop_candidates
+ )
+
+ # Render template
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+ regenerated_data = json.loads(regenerated_text)
+
+ # Should match exactly
+ assert regenerated_data == original_data
+
+
+class TestRoundtripYAML:
+ """Roundtrip tests for YAML files."""
+
+ def test_bar_yaml_roundtrip(self):
+ """Test bar.yaml can be regenerated from template."""
+ samples_dir = Path(__file__).parent / "samples"
+ yaml_file = samples_dir / "bar.yaml"
+
+ if not yaml_file.exists():
+ pytest.skip("bar.yaml not found")
+
+ # Read original
+ original_text = yaml_file.read_text()
+ original_data = yaml.safe_load(original_text)
+
+ # Generate template and YAML
+ fmt, parsed = parse_config(yaml_file)
+ loop_candidates = analyze_loops(fmt, parsed)
+ flat_items = flatten_config(fmt, parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ fmt, parsed, "test", original_text, loop_candidates
+ )
+
+ # Load variables from YAML
+ variables = yaml.safe_load(ansible_yaml)
+
+ # Render template
+ regenerated_text = render_template(template, variables)
+ regenerated_data = yaml.safe_load(regenerated_text)
+
+ # Compare data structures
+ assert regenerated_data == original_data, (
+ f"Regenerated YAML differs from original\n"
+ f"Original: {original_data}\n"
+ f"Regenerated: {regenerated_data}"
+ )
+
+ def test_yaml_with_lists_roundtrip(self):
+ """Test YAML with various list structures."""
+ yaml_text = """
+ name: myapp
+ simple_list:
+ - item1
+ - item2
+ - item3
+ list_of_dicts:
+ - name: first
+ value: 1
+ - name: second
+ value: 2
+ nested:
+ inner_list:
+ - a
+ - b
+ """
+
+ original_data = yaml.safe_load(yaml_text)
+
+ # Generate template and YAML
+ loop_candidates = analyze_loops("yaml", original_data)
+ flat_items = flatten_config("yaml", original_data, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "yaml", original_data, "test", yaml_text, loop_candidates
+ )
+
+ # Render template
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+ regenerated_data = yaml.safe_load(regenerated_text)
+
+ # Compare
+ assert regenerated_data == original_data
+
+
+class TestRoundtripTOML:
+ """Roundtrip tests for TOML files."""
+
+ def test_tom_toml_roundtrip(self):
+ """Test tom.toml can be regenerated from template."""
+ samples_dir = Path(__file__).parent / "samples"
+ toml_file = samples_dir / "tom.toml"
+
+ if not toml_file.exists():
+ pytest.skip("tom.toml not found")
+
+ # Read original
+ original_text = toml_file.read_text()
+ import tomllib
+
+ original_data = tomllib.loads(original_text)
+
+ # Generate template and YAML
+ fmt, parsed = parse_config(toml_file)
+ loop_candidates = analyze_loops(fmt, parsed)
+ flat_items = flatten_config(fmt, parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ fmt, parsed, "test", original_text, loop_candidates
+ )
+
+ # Load variables from YAML
+ variables = yaml.safe_load(ansible_yaml)
+
+ # Render template
+ regenerated_text = render_template(template, variables)
+ regenerated_data = tomllib.loads(regenerated_text)
+
+ # Compare data structures
+ # Note: TOML datetime objects need special handling
+ assert _compare_toml_data(regenerated_data, original_data), (
+ f"Regenerated TOML differs from original\n"
+ f"Original: {original_data}\n"
+ f"Regenerated: {regenerated_data}"
+ )
+
+ def test_toml_with_arrays_roundtrip(self):
+ """Test TOML with inline arrays and array-of-tables."""
+ toml_text = """
+ name = "test"
+ ports = [8080, 8081, 8082]
+
+ [[database]]
+ host = "db1.example.com"
+ port = 5432
+
+ [[database]]
+ host = "db2.example.com"
+ port = 5433
+ """
+
+ import tomllib
+
+ original_data = tomllib.loads(toml_text)
+
+ # Generate template and YAML
+ loop_candidates = analyze_loops("toml", original_data)
+ flat_items = flatten_config("toml", original_data, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "toml", original_data, "test", toml_text, loop_candidates
+ )
+
+ # Render template
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+ regenerated_data = tomllib.loads(regenerated_text)
+
+ # Compare
+ assert regenerated_data == original_data
+
+
+class TestRoundtripXML:
+ """Roundtrip tests for XML files."""
+
+ def test_xml_simple_roundtrip(self):
+ """Test simple XML can be regenerated."""
+ xml_text = """
+
+ test
+ 8080
+ server1
+ server2
+ server3
+
+"""
+
+ import xml.etree.ElementTree as ET
+
+ original_root = ET.fromstring(xml_text)
+
+ # Generate template and YAML
+ fmt = "xml"
+ loop_candidates = analyze_loops(fmt, original_root)
+ flat_items = flatten_config(fmt, original_root, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ fmt, original_root, "test", xml_text, loop_candidates
+ )
+
+ # Render template
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+
+ # Parse regenerated XML
+ regenerated_root = ET.fromstring(regenerated_text)
+
+ # Compare XML structures (ignore insignificant whitespace)
+ assert _xml_elements_equal(
+ original_root, regenerated_root, ignore_whitespace=True
+ ), (
+ f"Regenerated XML differs from original\n"
+ f"Original: {ET.tostring(original_root, encoding='unicode')}\n"
+ f"Regenerated: {ET.tostring(regenerated_root, encoding='unicode')}"
+ )
+
+ def test_ossec_xml_roundtrip(self):
+ """Test ossec.xml (complex real-world XML) roundtrip."""
+ samples_dir = Path(__file__).parent / "samples"
+ xml_file = samples_dir / "ossec.xml"
+
+ if not xml_file.exists():
+ pytest.skip("ossec.xml not found")
+
+ # Read original
+ original_text = xml_file.read_text()
+ import xml.etree.ElementTree as ET
+
+ original_root = ET.fromstring(original_text)
+
+ # Generate template and YAML
+ fmt, parsed = parse_config(xml_file)
+ loop_candidates = analyze_loops(fmt, parsed)
+ flat_items = flatten_config(fmt, parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ fmt, parsed, "test", original_text, loop_candidates
+ )
+
+ # Load variables and render
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+
+ # Parse regenerated
+ regenerated_root = ET.fromstring(regenerated_text)
+
+ # Compare - for complex XML, we compare structure not exact text
+ assert _xml_elements_equal(
+ original_root, regenerated_root, ignore_whitespace=True
+ )
+
+
+class TestRoundtripINI:
+ """Roundtrip tests for INI files."""
+
+ def test_ini_simple_roundtrip(self):
+ """Test simple INI can be regenerated."""
+ ini_text = """[section1]
+key1 = value1
+key2 = value2
+
+[section2]
+key3 = value3
+"""
+
+ from configparser import ConfigParser
+
+ original_config = ConfigParser()
+ original_config.read_string(ini_text)
+
+ # Generate template and YAML
+ fmt = "ini"
+ loop_candidates = analyze_loops(fmt, original_config)
+ flat_items = flatten_config(fmt, original_config, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ fmt, original_config, "test", ini_text, loop_candidates
+ )
+
+ # Render template
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+
+ # Parse regenerated
+ regenerated_config = ConfigParser()
+ regenerated_config.read_string(regenerated_text)
+
+ # Compare
+ assert _ini_configs_equal(original_config, regenerated_config)
+
+
+class TestRoundtripEdgeCases:
+ """Roundtrip tests for edge cases and special scenarios."""
+
+ def test_empty_lists_roundtrip(self):
+ """Test handling of empty lists."""
+ json_text = '{"items": []}'
+ original_data = json.loads(json_text)
+
+ loop_candidates = analyze_loops("json", original_data)
+ flat_items = flatten_config("json", original_data, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "json", original_data, "test", None, loop_candidates
+ )
+
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+ regenerated_data = json.loads(regenerated_text)
+
+ assert regenerated_data == original_data
+
+ def test_special_characters_roundtrip(self):
+ """Test handling of special characters."""
+ json_data = {
+ "quote": 'He said "hello"',
+ "backslash": "path\\to\\file",
+ "newline": "line1\nline2",
+ "unicode": "emoji: š",
+ }
+
+ loop_candidates = analyze_loops("json", json_data)
+ flat_items = flatten_config("json", json_data, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "json", json_data, "test", None, loop_candidates
+ )
+
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+ regenerated_data = json.loads(regenerated_text)
+
+ assert regenerated_data == json_data
+
+ def test_numeric_types_roundtrip(self):
+ """Test preservation of numeric types."""
+ json_data = {
+ "int": 42,
+ "float": 3.14159,
+ "negative": -100,
+ "zero": 0,
+ "large": 9999999999,
+ }
+
+ loop_candidates = analyze_loops("json", json_data)
+ flat_items = flatten_config("json", json_data, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "json", json_data, "test", None, loop_candidates
+ )
+
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+ regenerated_data = json.loads(regenerated_text)
+
+ assert regenerated_data == json_data
+
+ def test_boolean_preservation_roundtrip(self):
+ """Test that booleans are preserved correctly."""
+ yaml_text = """
+ enabled: true
+ disabled: false
+ """
+
+ original_data = yaml.safe_load(yaml_text)
+
+ loop_candidates = analyze_loops("yaml", original_data)
+ flat_items = flatten_config("yaml", original_data, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "yaml", original_data, "test", yaml_text, loop_candidates
+ )
+
+ variables = yaml.safe_load(ansible_yaml)
+ regenerated_text = render_template(template, variables)
+ regenerated_data = yaml.safe_load(regenerated_text)
+
+ # Both should be actual booleans
+ assert regenerated_data["enabled"] is True
+ assert regenerated_data["disabled"] is False
+
+
+# Helper functions
+
+
+def _compare_toml_data(data1: Any, data2: Any) -> bool:
+ """Compare TOML data, handling datetime objects."""
+ import datetime
+
+ if type(data1) != type(data2):
+ return False
+
+ if isinstance(data1, dict):
+ if set(data1.keys()) != set(data2.keys()):
+ return False
+ return all(_compare_toml_data(data1[k], data2[k]) for k in data1.keys())
+
+ elif isinstance(data1, list):
+ if len(data1) != len(data2):
+ return False
+ return all(_compare_toml_data(v1, v2) for v1, v2 in zip(data1, data2))
+
+ elif isinstance(data1, datetime.datetime):
+ # Compare datetime objects
+ return data1 == data2
+
+ else:
+ return data1 == data2
+
+
+def _xml_elements_equal(elem1, elem2, ignore_whitespace: bool = False) -> bool:
+ """Compare two XML elements for equality."""
+ # Compare tags
+ if elem1.tag != elem2.tag:
+ return False
+
+ # Compare attributes
+ if elem1.attrib != elem2.attrib:
+ return False
+
+ # Compare text
+ text1 = (elem1.text or "").strip() if ignore_whitespace else (elem1.text or "")
+ text2 = (elem2.text or "").strip() if ignore_whitespace else (elem2.text or "")
+ if text1 != text2:
+ return False
+
+ # Compare tail
+ tail1 = (elem1.tail or "").strip() if ignore_whitespace else (elem1.tail or "")
+ tail2 = (elem2.tail or "").strip() if ignore_whitespace else (elem2.tail or "")
+ if tail1 != tail2:
+ return False
+
+ # Compare children
+ children1 = list(elem1)
+ children2 = list(elem2)
+
+ if len(children1) != len(children2):
+ return False
+
+ return all(
+ _xml_elements_equal(c1, c2, ignore_whitespace)
+ for c1, c2 in zip(children1, children2)
+ )
+
+
+def _ini_configs_equal(config1, config2) -> bool:
+ """Compare two ConfigParser objects for equality."""
+ if set(config1.sections()) != set(config2.sections()):
+ return False
+
+ for section in config1.sections():
+ if set(config1.options(section)) != set(config2.options(section)):
+ return False
+
+ for option in config1.options(section):
+ if config1.get(section, option) != config2.get(section, option):
+ return False
+
+ return True
+
+
+if __name__ == "__main__":
+ pytest.main([__file__, "-v"])
diff --git a/tests/test_toml_handler.py b/tests/test_toml_handler.py
new file mode 100644
index 0000000..a446536
--- /dev/null
+++ b/tests/test_toml_handler.py
@@ -0,0 +1,114 @@
+from __future__ import annotations
+
+from pathlib import Path
+
+import pytest
+import yaml
+
+from jinjaturtle.core import (
+ parse_config,
+ flatten_config,
+ generate_ansible_yaml,
+ generate_jinja2_template,
+)
+from jinjaturtle.handlers.toml import TomlHandler
+import jinjaturtle.handlers.toml as toml_module
+
+SAMPLES_DIR = Path(__file__).parent / "samples"
+
+
+def test_toml_sample_roundtrip():
+ toml_path = SAMPLES_DIR / "tom.toml"
+ assert toml_path.is_file(), f"Missing sample TOML file: {toml_path}"
+
+ fmt, parsed = parse_config(toml_path)
+ assert fmt == "toml"
+
+ flat_items = flatten_config(fmt, parsed)
+ assert flat_items
+
+ ansible_yaml = generate_ansible_yaml("jinjaturtle", flat_items)
+ defaults = yaml.safe_load(ansible_yaml)
+
+ # defaults should be a non-empty dict
+ assert isinstance(defaults, dict)
+ assert defaults, "Expected non-empty defaults for TOML sample"
+
+ # all keys should be lowercase, start with prefix, and have no spaces
+ for key in defaults:
+ assert key.startswith("jinjaturtle_")
+ assert key == key.lower()
+ assert " " not in key
+
+ # template generation ā **now with original_text**
+ original_text = toml_path.read_text(encoding="utf-8")
+ template = generate_jinja2_template(
+ fmt, parsed, "jinjaturtle", original_text=original_text
+ )
+ assert isinstance(template, str)
+ assert template.strip()
+
+ # comments from the original file should now be preserved
+ assert "# This is a TOML document" in template
+
+ # each default variable name should appear in the template as a Jinja placeholder
+ for var_name in defaults:
+ assert (
+ var_name in template
+ ), f"Variable {var_name} not referenced in TOML template"
+
+
+def test_parse_config_toml_missing_tomllib(monkeypatch):
+ """
+ Force tomllib to None to hit the RuntimeError branch when parsing TOML.
+ """
+ toml_path = SAMPLES_DIR / "tom.toml"
+
+ # Simulate an environment without tomllib/tomli
+ monkeypatch.setattr(toml_module, "tomllib", None)
+
+ with pytest.raises(RuntimeError) as exc:
+ parse_config(toml_path, fmt="toml")
+ assert "tomllib/tomli is required" in str(exc.value)
+
+
+def test_generate_jinja2_template_fallback_toml():
+ """
+ When original_text is not provided, generate_jinja2_template should use the
+ structural fallback path for TOML configs.
+ """
+ parsed_toml = {
+ "title": "Example",
+ "server": {"port": 8080, "host": "127.0.0.1"},
+ "logging": {
+ "file": {"path": "/tmp/app.log"}
+ }, # nested table to hit recursive walk
+ }
+ tmpl_toml = generate_jinja2_template("toml", parsed=parsed_toml, role_prefix="role")
+ assert "[server]" in tmpl_toml
+ assert "role_server_port" in tmpl_toml
+ assert "[logging]" in tmpl_toml or "[logging.file]" in tmpl_toml
+
+
+def test_generate_toml_template_from_text_edge_cases():
+ # Cover CRLF newlines, lines without '=', empty keys, and inline tables
+ # that both parse successfully and fail parsing.
+ text = (
+ "# comment\r\n"
+ "[table]\r\n"
+ "noequals\r\n"
+ " = 42\r\n"
+ 'inline_good = { name = "abc", value = 1 }\r\n'
+ "inline_bad = { invalid = }\r\n"
+ )
+ handler = TomlHandler()
+ tmpl = handler._generate_toml_template_from_text("role", text)
+
+ # The good inline table should expand into two separate variables.
+ assert "role_table_inline_good_name" in tmpl
+ assert "role_table_inline_good_value" in tmpl
+ # The bad inline table should fall back to scalar handling.
+ assert "role_table_inline_bad" in tmpl
+ # Ensure the lines without '=' / empty key were handled without exploding.
+ assert "[table]" in tmpl
+ assert "noequals" in tmpl
diff --git a/tests/test_xml_handler.py b/tests/test_xml_handler.py
new file mode 100644
index 0000000..6b124c4
--- /dev/null
+++ b/tests/test_xml_handler.py
@@ -0,0 +1,232 @@
+from __future__ import annotations
+
+from pathlib import Path
+import textwrap
+import xml.etree.ElementTree as ET
+
+import pytest
+import yaml
+
+from jinjaturtle.core import (
+ parse_config,
+ flatten_config,
+ generate_ansible_yaml,
+ generate_jinja2_template,
+)
+from jinjaturtle.handlers.xml import XmlHandler
+
+SAMPLES_DIR = Path(__file__).parent / "samples"
+
+
+def test_xml_roundtrip_ossec_web_rules():
+ xml_path = SAMPLES_DIR / "ossec.xml"
+ assert xml_path.is_file(), f"Missing sample XML file: {xml_path}"
+
+ fmt, parsed = parse_config(xml_path)
+ assert fmt == "xml"
+
+ flat_items = flatten_config(fmt, parsed)
+ assert flat_items, "Expected at least one flattened item from XML sample"
+
+ ansible_yaml = generate_ansible_yaml("ossec", flat_items)
+ defaults = yaml.safe_load(ansible_yaml)
+
+ # defaults should be a non-empty dict
+ assert isinstance(defaults, dict)
+ assert defaults, "Expected non-empty defaults for XML sample"
+
+ # all keys should be lowercase, start with prefix, and have no spaces
+ for key in defaults:
+ assert key.startswith("ossec_")
+ assert key == key.lower()
+ assert " " not in key
+
+ # Root attribute should flatten to ossec_name
+ assert defaults["ossec_name"] == "web,accesslog,"
+
+ # There should be at least one default for rule id="31100"
+ id_keys = [k for k, v in defaults.items() if v == "31100"]
+ assert id_keys, "Expected to find a default for rule id 31100"
+
+ # At least one of them should be the rule *id* attribute
+ assert any(
+ key.startswith("ossec_rule_") and key.endswith("_id") for key in id_keys
+ ), f"Expected at least one *_id var for value 31100, got: {id_keys}"
+
+ # Template generation (preserving comments)
+ original_text = xml_path.read_text(encoding="utf-8")
+ template = generate_jinja2_template(
+ fmt, parsed, "ossec", original_text=original_text
+ )
+ assert isinstance(template, str)
+ assert template.strip(), "Template for XML sample should not be empty"
+
+ # Top-of-file and mid-file comments should be preserved
+ assert "Official Web access rules for OSSEC." in template
+ assert "Rules to ignore crawlers" in template
+
+ # Each default variable name should appear in the template as a Jinja placeholder
+ for var_name in defaults:
+ assert (
+ var_name in template
+ ), f"Variable {var_name} not referenced in XML template"
+
+
+def test_generate_xml_template_from_text_edge_cases():
+ """
+ Exercise XML text edge cases:
+ - XML declaration and DOCTYPE in prolog
+ - top-level and inner comments
+ - repeated child elements (indexing)
+ - attributes and text content
+ """
+ text = textwrap.dedent(
+ """\
+
+
+
+
+
+ text
+ other
+
+ """
+ )
+
+ handler = XmlHandler()
+ tmpl = handler._generate_xml_template_from_text("role", text)
+
+ # Prolog and comments preserved
+ assert " role_attr)
+ assert "role_attr" in tmpl
+
+ # Repeated elements should be indexed in both attr and text
+ assert "role_child_0_attr" in tmpl
+ assert "role_child_0" in tmpl
+ assert "role_child_1" in tmpl
+
+
+def test_generate_jinja2_template_xml_type_error():
+ """
+ Wrong type for XML in XmlHandler.generate_jinja2_template should raise TypeError.
+ """
+ handler = XmlHandler()
+ with pytest.raises(TypeError):
+ handler.generate_jinja2_template(parsed="not an element", role_prefix="role")
+
+
+def test_flatten_config_xml_type_error():
+ """
+ Wrong type for XML in flatten_config should raise TypeError.
+ """
+ with pytest.raises(TypeError):
+ flatten_config("xml", parsed="not-an-element")
+
+
+def test_generate_jinja2_template_xml_structural_fallback():
+ """
+ When original_text is not provided for XML, generate_jinja2_template should use
+ the structural fallback path (ET.tostring + handler processing).
+ """
+ xml_text = textwrap.dedent(
+ """\
+
+ 2
+ text
+
+ """
+ )
+ root = ET.fromstring(xml_text)
+
+ tmpl = generate_jinja2_template("xml", parsed=root, role_prefix="role")
+
+ # Root attribute path ("@attr",) -> role_attr
+ assert "role_attr" in tmpl
+
+ # Simple child element text ("child",) -> role_child
+ assert "role_child" in tmpl
+
+ # Element with both attr and text:
+ # - attr -> ("node", "@attr") -> role_node_attr
+ # - text -> ("node", "value") -> role_node_value
+ assert "role_node_attr" in tmpl
+ assert "role_node_value" in tmpl
+
+
+def test_split_xml_prolog_only_whitespace():
+ """
+ Whitespace-only input: prolog is the whitespace, body is empty.
+ Exercises the 'if i >= n: break' path.
+ """
+ text = " \n\t"
+ handler = XmlHandler()
+ prolog, body = handler._split_xml_prolog(text)
+ assert prolog == text
+ assert body == ""
+
+
+def test_split_xml_prolog_unterminated_declaration():
+ """
+ Unterminated XML declaration should hit the 'end == -1' branch and
+ treat the whole string as body.
+ """
+ text = ""
+ handler = XmlHandler()
+ prolog, body = handler._split_xml_prolog(text)
+ assert prolog == ""
+ assert body == text
+
+
+def test_flatten_xml_text_with_attributes_uses_value_suffix():
+ """
+ When an element has both attributes and text, _flatten_xml should store
+ the text at path + ('value',), not just path.
+ """
+ xml_text = "text"
+ root = ET.fromstring(xml_text)
+
+ items = flatten_config("xml", root)
+
+ # Attribute path: ("node", "@attr") -> "x"
+ assert (("node", "@attr"), "x") in items
+
+ # Text-with-attrs path: ("node", "value") -> "text"
+ assert (("node", "value"), "text") in items
diff --git a/tests/test_yaml_handler.py b/tests/test_yaml_handler.py
new file mode 100644
index 0000000..c7bacb7
--- /dev/null
+++ b/tests/test_yaml_handler.py
@@ -0,0 +1,102 @@
+from __future__ import annotations
+
+from pathlib import Path
+import textwrap
+
+import yaml
+
+from jinjaturtle.core import (
+ parse_config,
+ flatten_config,
+ generate_ansible_yaml,
+ generate_jinja2_template,
+)
+from jinjaturtle.handlers.yaml import YamlHandler
+
+SAMPLES_DIR = Path(__file__).parent / "samples"
+
+
+def test_yaml_roundtrip_with_list_and_comment():
+ yaml_path = SAMPLES_DIR / "bar.yaml"
+ assert yaml_path.is_file(), f"Missing sample YAML file: {yaml_path}"
+
+ fmt, parsed = parse_config(yaml_path)
+ assert fmt == "yaml"
+
+ flat_items = flatten_config(fmt, parsed)
+ ansible_yaml = generate_ansible_yaml("foobar", flat_items)
+ defaults = yaml.safe_load(ansible_yaml)
+
+ # Defaults: keys are flattened with indices
+ assert defaults["foobar_foo"] == "bar"
+ assert defaults["foobar_blah_0"] == "something"
+ assert defaults["foobar_blah_1"] == "else"
+
+ # Template generation (preserving comments)
+ original_text = yaml_path.read_text(encoding="utf-8")
+ template = generate_jinja2_template(
+ fmt, parsed, "foobar", original_text=original_text
+ )
+
+ # Comment preserved
+ assert "# Top comment" in template
+
+ # Scalar replacement
+ assert "foo:" in template
+ assert "foobar_foo" in template
+
+ # List items use indexed vars, not "item"
+ assert "foobar_blah_0" in template
+ assert "foobar_blah_1" in template
+ assert "{{ foobar_blah }}" not in template
+ assert "foobar_blah_item" not in template
+
+
+def test_generate_yaml_template_from_text_edge_cases():
+ """
+ Exercise YAML text edge cases:
+ - indentation dedent (stack pop)
+ - empty key before ':'
+ - quoted and unquoted list items
+ """
+ text = textwrap.dedent(
+ """
+ root:
+ child: 1
+ other: 2
+ : 3
+ list:
+ - "quoted"
+ - unquoted
+ """
+ )
+
+ handler = YamlHandler()
+ tmpl = handler._generate_yaml_template_from_text("role", text)
+
+ # Dedent from "root -> child" back to "other" exercises the stack-pop path.
+ # Just check the expected variable names appear.
+ assert "role_root_child" in tmpl
+ assert "role_other" in tmpl
+
+ # The weird " : 3" line has no key and should be left untouched.
+ assert " : 3" in tmpl
+
+ # The list should generate indexed variables for each item.
+ # First item is quoted (use_quotes=True), second is unquoted.
+ assert "role_list_0" in tmpl
+ assert "role_list_1" in tmpl
+
+
+def test_generate_jinja2_template_yaml_structural_fallback():
+ """
+ When original_text is not provided for YAML, generate_jinja2_template should use
+ the structural fallback path (yaml.safe_dump + handler processing).
+ """
+ parsed = {"outer": {"inner": "val"}}
+
+ tmpl = generate_jinja2_template("yaml", parsed=parsed, role_prefix="role")
+
+ # We don't care about exact formatting, just that the expected variable
+ # name shows up, proving we went through the structural path.
+ assert "role_outer_inner" in tmpl
diff --git a/tests/test_yaml_template_consistency.py b/tests/test_yaml_template_consistency.py
new file mode 100644
index 0000000..69184dd
--- /dev/null
+++ b/tests/test_yaml_template_consistency.py
@@ -0,0 +1,558 @@
+"""
+Tests to ensure all Jinja2 template variables exist in the Ansible YAML.
+
+These tests catch the bug where templates reference variables that don't exist
+because the YAML has a list but the template uses scalar references (or vice versa).
+"""
+
+from __future__ import annotations
+
+import re
+from pathlib import Path
+from typing import Set
+import yaml
+import pytest
+
+from jinjaturtle.core import (
+ parse_config,
+ analyze_loops,
+ flatten_config,
+ generate_ansible_yaml,
+ generate_jinja2_template,
+)
+
+
+def extract_jinja_variables(template: str) -> Set[str]:
+ """
+ Extract all Jinja2 variable names from a template that must exist in YAML.
+
+ Extracts variables from:
+ - {{ variable_name }}
+ - {{ variable.field }}
+ - {% for item in collection %}
+
+ Returns only the base variable names that must be defined in YAML.
+ Filters out loop variables (the 'item' part of 'for item in collection').
+ """
+ variables = set()
+
+ # First, find all loop variables (these are defined by the template, not YAML)
+ loop_vars = set()
+ for_pattern = r"\{%\s*for\s+(\w+)\s+in\s+([a-zA-Z_][a-zA-Z0-9_]*)"
+ for match in re.finditer(for_pattern, template):
+ loop_var = match.group(1) # The item
+ collection = match.group(2) # The collection
+ loop_vars.add(loop_var)
+ variables.add(collection) # Collection must exist in YAML
+
+ # Pattern 1: {{ variable_name }} or {{ variable.field }}
+ # Captures the first part before any dots or filters
+ var_pattern = r"\{\{\s*([a-zA-Z_][a-zA-Z0-9_]*)"
+ for match in re.finditer(var_pattern, template):
+ var_name = match.group(1)
+ # Only add if it's not a loop variable
+ if var_name not in loop_vars:
+ variables.add(var_name)
+
+ return variables
+
+
+def extract_yaml_variables(ansible_yaml: str) -> Set[str]:
+ """
+ Extract all variable names from Ansible YAML.
+
+ Returns the top-level keys from the YAML document.
+ """
+ data = yaml.safe_load(ansible_yaml)
+ if not isinstance(data, dict):
+ return set()
+ return set(data.keys())
+
+
+class TestTemplateYamlConsistency:
+ """Tests that verify template variables exist in YAML."""
+
+ def test_simple_json_consistency(self):
+ """Simple JSON with scalars and lists."""
+ json_text = """
+ {
+ "name": "test",
+ "values": [1, 2, 3]
+ }
+ """
+
+ fmt = "json"
+ import json
+
+ parsed = json.loads(json_text)
+
+ loop_candidates = analyze_loops(fmt, parsed)
+ flat_items = flatten_config(fmt, parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(fmt, parsed, "app", None, loop_candidates)
+
+ yaml_vars = extract_yaml_variables(ansible_yaml)
+ template_vars = extract_jinja_variables(template)
+
+ # Every variable in template must exist in YAML
+ missing_vars = template_vars - yaml_vars
+ assert not missing_vars, (
+ f"Template references variables not in YAML: {missing_vars}\n"
+ f"YAML vars: {yaml_vars}\n"
+ f"Template vars: {template_vars}\n"
+ f"Template:\n{template}\n"
+ f"YAML:\n{ansible_yaml}"
+ )
+
+ def test_toml_inline_array_consistency(self):
+ """TOML with inline array should use loops consistently."""
+ import tomllib
+
+ toml_text = """
+ name = "myapp"
+ servers = ["server1", "server2", "server3"]
+ """
+
+ parsed = tomllib.loads(toml_text)
+ loop_candidates = analyze_loops("toml", parsed)
+ flat_items = flatten_config("toml", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "toml", parsed, "app", toml_text, loop_candidates
+ )
+
+ yaml_vars = extract_yaml_variables(ansible_yaml)
+ template_vars = extract_jinja_variables(template)
+
+ missing_vars = template_vars - yaml_vars
+ assert not missing_vars, (
+ f"Template references variables not in YAML: {missing_vars}\n"
+ f"Template:\n{template}\n"
+ f"YAML:\n{ansible_yaml}"
+ )
+
+ def test_toml_array_of_tables_consistency(self):
+ """TOML with [[array.of.tables]] should use loops consistently."""
+ import tomllib
+
+ toml_text = """
+ [[database]]
+ host = "db1.example.com"
+ port = 5432
+
+ [[database]]
+ host = "db2.example.com"
+ port = 5433
+ """
+
+ parsed = tomllib.loads(toml_text)
+ loop_candidates = analyze_loops("toml", parsed)
+ flat_items = flatten_config("toml", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "toml", parsed, "app", toml_text, loop_candidates
+ )
+
+ yaml_vars = extract_yaml_variables(ansible_yaml)
+ template_vars = extract_jinja_variables(template)
+
+ missing_vars = template_vars - yaml_vars
+ assert not missing_vars, (
+ f"Template references variables not in YAML: {missing_vars}\n"
+ f"Template:\n{template}\n"
+ f"YAML:\n{ansible_yaml}"
+ )
+
+ # Additionally verify that if YAML has a list, template uses a loop
+ defaults = yaml.safe_load(ansible_yaml)
+ for var_name, value in defaults.items():
+ if isinstance(value, list) and len(value) > 1:
+ # YAML has a list - template should use {% for %}
+ assert "{% for" in template, (
+ f"YAML has list variable '{var_name}' but template doesn't use loops\n"
+ f"Template:\n{template}"
+ )
+
+ def test_yaml_list_consistency(self):
+ """YAML with lists should use loops consistently."""
+ yaml_text = """
+ name: myapp
+ servers:
+ - server1
+ - server2
+ - server3
+ databases:
+ - host: db1
+ port: 5432
+ - host: db2
+ port: 5433
+ """
+
+ parsed = yaml.safe_load(yaml_text)
+ loop_candidates = analyze_loops("yaml", parsed)
+ flat_items = flatten_config("yaml", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "yaml", parsed, "app", yaml_text, loop_candidates
+ )
+
+ yaml_vars = extract_yaml_variables(ansible_yaml)
+ template_vars = extract_jinja_variables(template)
+
+ missing_vars = template_vars - yaml_vars
+ assert not missing_vars, (
+ f"Template references variables not in YAML: {missing_vars}\n"
+ f"Template:\n{template}\n"
+ f"YAML:\n{ansible_yaml}"
+ )
+
+ def test_mixed_scalars_and_loops_consistency(self):
+ """Config with both scalars and loops should be consistent."""
+ import tomllib
+
+ toml_text = """
+ name = "myapp"
+ version = "1.0"
+ ports = [8080, 8081, 8082]
+
+ [database]
+ host = "localhost"
+ port = 5432
+
+ [[servers]]
+ name = "web1"
+ ip = "10.0.0.1"
+
+ [[servers]]
+ name = "web2"
+ ip = "10.0.0.2"
+ """
+
+ parsed = tomllib.loads(toml_text)
+ loop_candidates = analyze_loops("toml", parsed)
+ flat_items = flatten_config("toml", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "toml", parsed, "app", toml_text, loop_candidates
+ )
+
+ yaml_vars = extract_yaml_variables(ansible_yaml)
+ template_vars = extract_jinja_variables(template)
+
+ missing_vars = template_vars - yaml_vars
+ assert not missing_vars, (
+ f"Template references variables not in YAML: {missing_vars}\n"
+ f"Template:\n{template}\n"
+ f"YAML:\n{ansible_yaml}"
+ )
+
+ def test_no_orphaned_scalar_references(self):
+ """
+ When YAML has a list variable, template must NOT reference scalar indices.
+
+ This catches the bug where:
+ - YAML has: app_list: [1, 2, 3]
+ - Template incorrectly uses: {{ app_list_0 }}, {{ app_list_1 }}
+ """
+ import json
+
+ json_text = '{"items": [1, 2, 3, 4, 5]}'
+ parsed = json.loads(json_text)
+
+ loop_candidates = analyze_loops("json", parsed)
+ flat_items = flatten_config("json", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "json", parsed, "app", None, loop_candidates
+ )
+
+ defaults = yaml.safe_load(ansible_yaml)
+
+ # Check each list variable in YAML
+ for var_name, value in defaults.items():
+ if isinstance(value, list):
+ # Template should NOT reference app_items_0, app_items_1, etc.
+ for i in range(len(value)):
+ scalar_ref = f"{var_name}_{i}"
+ assert scalar_ref not in template, (
+ f"Template incorrectly uses scalar reference '{scalar_ref}' "
+ f"when YAML has '{var_name}' as a list\n"
+ f"Template should use loops, not scalar indices\n"
+ f"Template:\n{template}"
+ )
+
+ def test_all_sample_files_consistency(self):
+ """Test all sample files for consistency."""
+ samples_dir = Path(__file__).parent / "samples"
+
+ sample_files = [
+ ("foo.json", "json"),
+ ("bar.yaml", "yaml"),
+ ("tom.toml", "toml"),
+ ]
+
+ for filename, fmt in sample_files:
+ file_path = samples_dir / filename
+ if not file_path.exists():
+ pytest.skip(f"Sample file {filename} not found")
+
+ original_text = file_path.read_text()
+ fmt_detected, parsed = parse_config(file_path)
+
+ loop_candidates = analyze_loops(fmt_detected, parsed)
+ flat_items = flatten_config(fmt_detected, parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("test", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ fmt_detected, parsed, "test", original_text, loop_candidates
+ )
+
+ yaml_vars = extract_yaml_variables(ansible_yaml)
+ template_vars = extract_jinja_variables(template)
+
+ missing_vars = template_vars - yaml_vars
+ assert not missing_vars, (
+ f"File: {filename}\n"
+ f"Template references variables not in YAML: {missing_vars}\n"
+ f"YAML vars: {yaml_vars}\n"
+ f"Template vars: {template_vars}\n"
+ f"Template:\n{template}\n"
+ f"YAML:\n{ansible_yaml}"
+ )
+
+
+class TestStructuralConsistency:
+ """Tests that verify structural consistency between YAML and templates."""
+
+ def test_list_in_yaml_means_loop_in_template(self):
+ """When YAML has a list (len > 1), template should use {% for %}."""
+ import json
+
+ json_text = """
+ {
+ "scalar": "value",
+ "list": [1, 2, 3]
+ }
+ """
+
+ parsed = json.loads(json_text)
+ loop_candidates = analyze_loops("json", parsed)
+ flat_items = flatten_config("json", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "json", parsed, "app", None, loop_candidates
+ )
+
+ defaults = yaml.safe_load(ansible_yaml)
+
+ # Find list variables in YAML
+ list_vars = [
+ k for k, v in defaults.items() if isinstance(v, list) and len(v) > 1
+ ]
+
+ if list_vars:
+ # Template must contain for loops
+ assert "{% for" in template, (
+ f"YAML has list variables {list_vars} but template has no loops\n"
+ f"Template:\n{template}"
+ )
+
+ # Each list variable should be used in a for loop
+ for var_name in list_vars:
+ # Look for "{% for ... in var_name %}"
+ for_pattern = (
+ r"\{%\s*for\s+\w+\s+in\s+" + re.escape(var_name) + r"\s*%\}"
+ )
+ assert re.search(for_pattern, template), (
+ f"List variable '{var_name}' not used in a for loop\n"
+ f"Template:\n{template}"
+ )
+
+ def test_scalar_in_yaml_means_no_loop_in_template(self):
+ """When YAML has scalars, template should use {{ var }}, not loops."""
+ import json
+
+ json_text = """
+ {
+ "name": "test",
+ "port": 8080,
+ "enabled": true
+ }
+ """
+
+ parsed = json.loads(json_text)
+ loop_candidates = analyze_loops("json", parsed)
+ flat_items = flatten_config("json", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "json", parsed, "app", None, loop_candidates
+ )
+
+ defaults = yaml.safe_load(ansible_yaml)
+
+ # All variables are scalars - template should NOT have loops
+ scalar_vars = [
+ k for k, v in defaults.items() if not isinstance(v, (list, dict))
+ ]
+
+ # Check that scalar vars are used directly, not in loops
+ for var_name in scalar_vars:
+ # Should appear in {{ var_name }}, not {% for ... in var_name %}
+ direct_ref = f"{{{{ {var_name}"
+ loop_ref = f"for .* in {var_name}"
+
+ assert direct_ref in template, (
+ f"Scalar variable '{var_name}' should be directly referenced\n"
+ f"Template:\n{template}"
+ )
+
+ assert not re.search(loop_ref, template), (
+ f"Scalar variable '{var_name}' incorrectly used in a loop\n"
+ f"Template:\n{template}"
+ )
+
+ def test_no_undefined_variable_errors(self):
+ """
+ Simulate Ansible template rendering to catch undefined variables.
+
+ This is the ultimate test - actually render the template with the YAML
+ and verify no undefined variable errors occur.
+ """
+ from jinja2 import Environment, StrictUndefined
+ import json
+
+ json_text = """
+ {
+ "name": "myapp",
+ "servers": ["web1", "web2"],
+ "database": {
+ "host": "localhost",
+ "port": 5432
+ }
+ }
+ """
+
+ parsed = json.loads(json_text)
+ loop_candidates = analyze_loops("json", parsed)
+ flat_items = flatten_config("json", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "json", parsed, "app", None, loop_candidates
+ )
+
+ # Load variables from YAML
+ variables = yaml.safe_load(ansible_yaml)
+
+ # Try to render the template
+ env = Environment(undefined=StrictUndefined)
+ try:
+ jinja_template = env.from_string(template)
+ rendered = jinja_template.render(variables)
+
+ # Successfully rendered - this is what we want!
+ assert rendered, "Template rendered successfully"
+
+ except Exception as e:
+ pytest.fail(
+ f"Template rendering failed with variables from YAML\n"
+ f"Error: {e}\n"
+ f"Template:\n{template}\n"
+ f"Variables:\n{ansible_yaml}"
+ )
+
+
+class TestRegressionBugs:
+ """Tests for specific bugs that were found and fixed."""
+
+ def test_toml_array_of_tables_no_scalar_refs(self):
+ """
+ Regression test: TOML [[array]] should not generate scalar references.
+
+ Bug: Template had {{ app_database_host }} when YAML had app_database as list.
+ """
+ import tomllib
+
+ toml_text = """
+ [[database]]
+ host = "db1"
+ port = 5432
+
+ [[database]]
+ host = "db2"
+ port = 5433
+ """
+
+ parsed = tomllib.loads(toml_text)
+ loop_candidates = analyze_loops("toml", parsed)
+ flat_items = flatten_config("toml", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "toml", parsed, "app", toml_text, loop_candidates
+ )
+
+ # YAML should have app_database as a list
+ defaults = yaml.safe_load(ansible_yaml)
+ assert isinstance(
+ defaults.get("app_database"), list
+ ), f"Expected app_database to be a list in YAML\n{ansible_yaml}"
+
+ # Template should NOT have app_database_host or app_database_port
+ assert (
+ "app_database_host" not in template
+ ), f"Template incorrectly uses scalar 'app_database_host'\n{template}"
+ assert (
+ "app_database_port" not in template
+ ), f"Template incorrectly uses scalar 'app_database_port'\n{template}"
+
+ # Template SHOULD use a loop
+ assert "{% for" in template, f"Template should use a loop\n{template}"
+ assert (
+ "app_database" in template
+ ), f"Template should reference app_database\n{template}"
+
+ def test_json_array_no_index_refs(self):
+ """
+ Regression test: JSON arrays should not generate index references.
+
+ Bug: Template had {{ app_list_0 }}, {{ app_list_1 }} when YAML had app_list as list.
+ """
+ import json
+
+ json_text = '{"items": [1, 2, 3]}'
+ parsed = json.loads(json_text)
+
+ loop_candidates = analyze_loops("json", parsed)
+ flat_items = flatten_config("json", parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ "json", parsed, "app", None, loop_candidates
+ )
+
+ # YAML should have app_items as a list
+ defaults = yaml.safe_load(ansible_yaml)
+ assert isinstance(defaults.get("app_items"), list)
+
+ # Template should NOT have app_items_0, app_items_1, app_items_2
+ for i in range(3):
+ assert (
+ f"app_items_{i}" not in template
+ ), f"Template incorrectly uses scalar 'app_items_{i}'\n{template}"
+
+ # Template SHOULD use a loop
+ assert "{% for" in template
+ assert "app_items" in template
+
+
+if __name__ == "__main__":
+ pytest.main([__file__, "-v"])
diff --git a/utils/diff_configs.py b/utils/diff_configs.py
new file mode 100644
index 0000000..b35d6aa
--- /dev/null
+++ b/utils/diff_configs.py
@@ -0,0 +1,218 @@
+#!/usr/bin/env python3
+"""
+Side-by-side comparison of original vs regenerated config.
+
+Usage:
+ ./diff_configs.py tests/samples/foo.json
+ ./diff_configs.py tests/samples/tom.toml --context 5
+"""
+
+import argparse
+import sys
+from pathlib import Path
+import difflib
+import yaml
+from jinja2 import Environment, StrictUndefined
+
+# Add parent directory to path for imports
+sys.path.insert(0, str(Path(__file__).parent))
+
+from jinjaturtle.core import (
+ parse_config,
+ analyze_loops,
+ flatten_config,
+ generate_ansible_yaml,
+ generate_jinja2_template,
+)
+
+
+def colorize(text: str, color: str) -> str:
+ """Add ANSI color codes."""
+ colors = {
+ "red": "\033[91m",
+ "green": "\033[92m",
+ "yellow": "\033[93m",
+ "blue": "\033[94m",
+ "reset": "\033[0m",
+ }
+ return f"{colors.get(color, '')}{text}{colors['reset']}"
+
+
+def side_by_side_diff(original: str, regenerated: str, width: int = 80):
+ """Print side-by-side diff."""
+ orig_lines = original.splitlines()
+ regen_lines = regenerated.splitlines()
+
+ # Calculate column width
+ col_width = width // 2 - 3
+
+ print(
+ colorize("ORIGINAL".center(col_width), "blue")
+ + " | "
+ + colorize("REGENERATED".center(col_width), "green")
+ )
+ print("-" * col_width + "-+-" + "-" * col_width)
+
+ max_lines = max(len(orig_lines), len(regen_lines))
+
+ for i in range(max_lines):
+ orig_line = orig_lines[i] if i < len(orig_lines) else ""
+ regen_line = regen_lines[i] if i < len(regen_lines) else ""
+
+ # Truncate if too long
+ if len(orig_line) > col_width - 2:
+ orig_line = orig_line[: col_width - 5] + "..."
+ if len(regen_line) > col_width - 2:
+ regen_line = regen_line[: col_width - 5] + "..."
+
+ # Color lines if different
+ if orig_line != regen_line:
+ orig_display = colorize(orig_line.ljust(col_width), "red")
+ regen_display = colorize(regen_line.ljust(col_width), "green")
+ else:
+ orig_display = orig_line.ljust(col_width)
+ regen_display = regen_line.ljust(col_width)
+
+ print(f"{orig_display} | {regen_display}")
+
+
+def unified_diff(original: str, regenerated: str, filename: str, context: int = 3):
+ """Print unified diff."""
+ orig_lines = original.splitlines(keepends=True)
+ regen_lines = regenerated.splitlines(keepends=True)
+
+ diff = difflib.unified_diff(
+ orig_lines,
+ regen_lines,
+ fromfile=f"{filename} (original)",
+ tofile=f"{filename} (regenerated)",
+ n=context,
+ )
+
+ for line in diff:
+ if line.startswith("+++") or line.startswith("---"):
+ print(colorize(line.rstrip(), "blue"))
+ elif line.startswith("@@"):
+ print(colorize(line.rstrip(), "cyan"))
+ elif line.startswith("+"):
+ print(colorize(line.rstrip(), "green"))
+ elif line.startswith("-"):
+ print(colorize(line.rstrip(), "red"))
+ else:
+ print(line.rstrip())
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Compare original config with regenerated version",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+
+ parser.add_argument("file", type=Path, help="Config file to check")
+
+ parser.add_argument(
+ "--mode",
+ choices=["side-by-side", "unified", "both"],
+ default="both",
+ help="Comparison mode (default: both)",
+ )
+
+ parser.add_argument(
+ "--context",
+ type=int,
+ default=3,
+ help="Number of context lines for unified diff (default: 3)",
+ )
+
+ parser.add_argument(
+ "--width",
+ type=int,
+ default=160,
+ help="Terminal width for side-by-side (default: 160)",
+ )
+
+ args = parser.parse_args()
+
+ if not args.file.exists():
+ print(colorize(f"ā File not found: {args.file}", "red"))
+ return 1
+
+ print(colorize(f"\n{'=' * 80}", "blue"))
+ print(colorize(f" Comparing: {args.file}", "blue"))
+ print(colorize(f"{'=' * 80}\n", "blue"))
+
+ # Read and regenerate
+ try:
+ original_text = args.file.read_text()
+
+ fmt, parsed = parse_config(args.file)
+ loop_candidates = analyze_loops(fmt, parsed)
+ flat_items = flatten_config(fmt, parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml("app", flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ fmt, parsed, "app", original_text, loop_candidates
+ )
+
+ variables = yaml.safe_load(ansible_yaml)
+ env = Environment(undefined=StrictUndefined)
+ jinja_template = env.from_string(template)
+ regenerated_text = jinja_template.render(variables)
+
+ # Check if identical
+ if original_text.strip() == regenerated_text.strip():
+ print(colorize("ā
Files are IDENTICAL (text comparison)\n", "green"))
+ else:
+ # Show diff
+ if args.mode in ("unified", "both"):
+ print(colorize("\n--- UNIFIED DIFF ---\n", "yellow"))
+ unified_diff(
+ original_text, regenerated_text, args.file.name, args.context
+ )
+
+ if args.mode in ("side-by-side", "both"):
+ print(colorize("\n--- SIDE-BY-SIDE COMPARISON ---\n", "yellow"))
+ side_by_side_diff(original_text, regenerated_text, args.width)
+
+ # Try semantic comparison
+ print(colorize(f"\n{'=' * 80}", "cyan"))
+ print(colorize(" Semantic Comparison", "cyan"))
+ print(colorize(f"{'=' * 80}", "cyan"))
+
+ try:
+ if fmt == "json":
+ import json
+
+ if json.loads(original_text) == json.loads(regenerated_text):
+ print(colorize("ā
JSON data structures are IDENTICAL", "green"))
+ else:
+ print(colorize("ā ļø JSON data structures DIFFER", "yellow"))
+ elif fmt == "yaml":
+ if yaml.safe_load(original_text) == yaml.safe_load(regenerated_text):
+ print(colorize("ā
YAML data structures are IDENTICAL", "green"))
+ else:
+ print(colorize("ā ļø YAML data structures DIFFER", "yellow"))
+ elif fmt == "toml":
+ try:
+ import tomllib
+ except Exception:
+ import tomli as tomllib
+ if tomllib.loads(original_text) == tomllib.loads(regenerated_text):
+ print(colorize("ā
TOML data structures are IDENTICAL", "green"))
+ else:
+ print(colorize("ā ļø TOML data structures DIFFER", "yellow"))
+ except Exception as e:
+ print(colorize(f"ā¹ļø Could not compare semantically: {e}", "yellow"))
+
+ except Exception as e:
+ print(colorize(f"ā ERROR: {e}", "red"))
+ import traceback
+
+ traceback.print_exc()
+ return 1
+
+ return 0
+
+
+if __name__ == "__main__":
+ sys.exit(main())
diff --git a/utils/regenerate.py b/utils/regenerate.py
new file mode 100644
index 0000000..f26bb32
--- /dev/null
+++ b/utils/regenerate.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python3
+"""
+Regenerate config files and save all intermediate files.
+
+Creates:
+ - original.{ext}
+ - defaults/main.yml
+ - templates/config.j2
+ - regenerated.{ext}
+
+Usage:
+ ./regenerate.py tests/samples/foo.json
+ ./regenerate.py tests/samples/tom.toml --output-dir tmp/toml_test
+"""
+
+import argparse
+import sys
+from pathlib import Path
+import yaml
+from jinja2 import Environment, StrictUndefined
+
+# Add parent directory to path for imports
+sys.path.insert(0, str(Path(__file__).parent))
+
+from jinjaturtle.core import (
+ parse_config,
+ analyze_loops,
+ flatten_config,
+ generate_ansible_yaml,
+ generate_jinja2_template,
+)
+
+
+def regenerate_and_save(config_file: Path, output_dir: Path, role_prefix: str = "app"):
+ """
+ Regenerate config and save all intermediate files.
+ """
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ # Read original
+ original_text = config_file.read_text()
+ fmt, parsed = parse_config(config_file)
+
+ # Determine extension
+ ext = config_file.suffix
+
+ # Save original
+ original_out = output_dir / f"original{ext}"
+ original_out.write_text(original_text)
+ print(f"š Saved: {original_out}")
+
+ # Generate Ansible files
+ loop_candidates = analyze_loops(fmt, parsed)
+ flat_items = flatten_config(fmt, parsed, loop_candidates)
+
+ ansible_yaml = generate_ansible_yaml(role_prefix, flat_items, loop_candidates)
+ template = generate_jinja2_template(
+ fmt, parsed, role_prefix, original_text, loop_candidates
+ )
+
+ # Save Ansible YAML
+ defaults_dir = output_dir / "defaults"
+ defaults_dir.mkdir(exist_ok=True)
+ defaults_file = defaults_dir / "main.yml"
+ defaults_file.write_text(ansible_yaml)
+ print(f"š Saved: {defaults_file}")
+
+ # Save template
+ templates_dir = output_dir / "templates"
+ templates_dir.mkdir(exist_ok=True)
+ template_file = templates_dir / "config.j2"
+ template_file.write_text(template)
+ print(f"š Saved: {template_file}")
+
+ # Render template
+ variables = yaml.safe_load(ansible_yaml)
+ env = Environment(undefined=StrictUndefined)
+ jinja_template = env.from_string(template)
+ regenerated_text = jinja_template.render(variables)
+
+ # Save regenerated
+ regenerated_out = output_dir / f"regenerated{ext}"
+ regenerated_out.write_text(regenerated_text)
+ print(f"š Saved: {regenerated_out}")
+
+ # Summary
+ print(f"\nā
All files saved to: {output_dir}")
+ print("\nš Statistics:")
+ print(f" Format: {fmt}")
+ print(f" Loop candidates: {len(loop_candidates)}")
+ if loop_candidates:
+ print(" Loops detected:")
+ for c in loop_candidates:
+ print(f" - {'.'.join(c.path)}: {len(c.items)} items")
+
+ # Check if identical
+ if original_text.strip() == regenerated_text.strip():
+ print("\nā
Original and regenerated are IDENTICAL (text comparison)")
+ else:
+ print("\nā ļø Original and regenerated differ in whitespace/formatting")
+ print(f" Run: diff {original_out} {regenerated_out}")
+
+ return output_dir
+
+
+def main():
+ parser = argparse.ArgumentParser(
+ description="Regenerate config and save all intermediate files",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ epilog="""
+Examples:
+ %(prog)s tests/samples/foo.json
+ %(prog)s tests/samples/tom.toml -o tmp/toml_output
+ %(prog)s tests/samples/bar.yaml --role-prefix myapp
+ """,
+ )
+
+ parser.add_argument("file", type=Path, help="Config file to process")
+
+ parser.add_argument(
+ "-o",
+ "--output-dir",
+ type=Path,
+ help="Output directory (default: regenerated_)",
+ )
+
+ parser.add_argument(
+ "-r",
+ "--role-prefix",
+ default="app",
+ help="Ansible role prefix for variables (default: app)",
+ )
+
+ args = parser.parse_args()
+
+ if not args.file.exists():
+ print(f"ā File not found: {args.file}")
+ return 1
+
+ # Determine output directory
+ if args.output_dir:
+ output_dir = args.output_dir
+ else:
+ output_dir = Path(f"regenerated_{args.file.stem}")
+
+ print(f"š Regenerating: {args.file}")
+ print(f"š Output directory: {output_dir}")
+ print(f"š·ļø Role prefix: {args.role_prefix}\n")
+
+ try:
+ regenerate_and_save(args.file, output_dir, args.role_prefix)
+ return 0
+ except Exception as e:
+ print(f"\nā ERROR: {e}")
+ import traceback
+
+ traceback.print_exc()
+ return 1
+
+
+if __name__ == "__main__":
+ sys.exit(main())