Compare commits
146 Commits
v1.5.0
...
70b06d2b3a
| Author | SHA1 | Date | |
|---|---|---|---|
| 70b06d2b3a | |||
| 00c668b595 | |||
| 12a38b7e6a | |||
| 37fd2192a5 | |||
| 607102e7f8 | |||
| 133cf63b9f | |||
| 6334936e8a | |||
| 946965f016 | |||
| 541a7f679f | |||
| 128f71745a | |||
| df2ce636c8 | |||
| 3b0dabf2a7 | |||
| 697370c906 | |||
| bc57172d92 | |||
| 0e7e23dce5 | |||
| 9d53f4c6f5 | |||
| a46d85b541 | |||
| acaea11eb6 | |||
| 056d21a859 | |||
| 612ba5069d | |||
| 551e245218 | |||
| 814523eac2 | |||
| 4f2c5013a7 | |||
| e01bb8c39a | |||
| 461a3c334d | |||
| e3de46c6a4 | |||
| b20882f492 | |||
| 430f21735e | |||
| acf1b69b70 | |||
| 7d574e67ec | |||
| aad6814fc5 | |||
| 411cd2df66 | |||
| 849d29c044 | |||
| 0947dea01e | |||
| 5d7e1fdbb3 | |||
| ac6981ad4d | |||
| f3a7b69bac | |||
| 5bcad7f5f3 | |||
| d39582d1da | |||
| 043d389a76 | |||
| cc1e543ebc | |||
| 25a0579809 | |||
| d4e461bb63 | |||
| 1864d0700e | |||
| a9bd8d202f | |||
| 28df54503e | |||
| aa489811e3 | |||
| f66af0157b | |||
| b0b3ccf5aa | |||
| e178afde31 | |||
| 9802293871 | |||
| a2138c9985 | |||
| 10998e50ad | |||
| a20814cb37 | |||
| feb5ba267f | |||
| 591be4ef35 | |||
| 3e6ef0fd68 | |||
| 3d5c770def | |||
| f4339a746a | |||
| 763f02a9a4 | |||
| 2eec873a17 | |||
| 17ee947930 | |||
| b989bdd4eb | |||
| c4da8368d8 | |||
| 997c265cfb | |||
| 955028288f | |||
| 866572e252 | |||
| b0a733369e | |||
| c5843ccd30 | |||
| 3cb7852cb4 | |||
| f995e3d368 | |||
| ffa9d9660a | |||
| be70dd4239 | |||
| 74876e2e15 | |||
| 54058c7f4d | |||
| 8583fdf172 | |||
| 374f4ed745 | |||
| 63e1b3d145 | |||
| 2f89de1ff5 | |||
| 019aa4b0d9 | |||
| 9c22c7dbb4 | |||
| f83e192e37 | |||
| 486863eb58 | |||
| bb23bd94f2 | |||
| 2a66c082eb | |||
| ee9d7758ed | |||
| 0119af330f | |||
| e117115b7f | |||
| 755b78fcb7 | |||
| 9485bc9e3f | |||
| dcda23435d | |||
| a69e81c44b | |||
| 2ca004d056 | |||
| f7bd5bfd0b | |||
| 2c15a4016b | |||
| 9e3ce34626 | |||
| 1a13fcaa4e | |||
| 48a0d1d458 | |||
| 783d2b921a | |||
| 6effacefef | |||
| 65903e740b | |||
| aa80a2ddb4 | |||
| 9456ad4475 | |||
| 3d7d7e9c09 | |||
| 328203ccd7 | |||
| ac16378807 | |||
| f7a86bc353 | |||
| 06a6a77a48 | |||
| 4883e40812 | |||
| 031ae5ac69 | |||
| 1c4fc531fa | |||
| 33dfbf3a4d | |||
| a3aa7b6394 | |||
| 724c262a4a | |||
| dcbe16c5f0 | |||
| f63b0a9f08 | |||
| 822c418503 | |||
| 562a6da291 | |||
| e61b30d9af | |||
| 27c0c7c01f | |||
| 0d652d995e | |||
| 0e03fbbee2 | |||
| 7cfd7e8d5c | |||
| 84b6c71748 | |||
| db9aaf920e | |||
| 69d28a461d | |||
| 03e414cc9f | |||
| 7674762c9a | |||
| a47de15e42 | |||
| 37f3057d31 | |||
| d55c8d3726 | |||
| 3990560cd7 | |||
| d1e5a71f77 | |||
| d59dc8ad53 | |||
| 55f4a1e941 | |||
| 2a4ec18532 | |||
| 2debdbee09 | |||
| 4cb62e90f8 | |||
| 923519497a | |||
| 5fa18cb449 | |||
| f513196911 | |||
| 7f06447bbd | |||
| 1e5d6d3eee | |||
| f2970adbb2 | |||
| 7f262c6557 | |||
| 0bc7a3ecc0 |
16
.claude/settings.json
Normal file
16
.claude/settings.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"permissions": {
|
||||
"allow": [
|
||||
"Bash(*)"
|
||||
],
|
||||
"ask": [
|
||||
"Skill(update-config)",
|
||||
"Skill(update-config:*)"
|
||||
]
|
||||
},
|
||||
"sandbox": {
|
||||
"enabled": true,
|
||||
"failIfUnavailable": true,
|
||||
"autoAllowBashIfSandboxed": true
|
||||
}
|
||||
}
|
||||
50
.github/workflows/ci.yml
vendored
50
.github/workflows/ci.yml
vendored
@@ -2,34 +2,72 @@ name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches-ignore:
|
||||
- main
|
||||
branches:
|
||||
- '**'
|
||||
pull_request:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: global-ci-${{ github.repository }}-${{ github.ref_name }}
|
||||
cancel-in-progress: false
|
||||
|
||||
jobs:
|
||||
security-codeql:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: read
|
||||
security-events: write
|
||||
uses: ./.github/workflows/security-codeql.yml
|
||||
|
||||
test-unit:
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/test-unit.yml
|
||||
|
||||
test-integration:
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-env-virtual:
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/test-env-virtual.yml
|
||||
|
||||
test-env-nix:
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/test-env-nix.yml
|
||||
|
||||
test-e2e:
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
|
||||
test-virgin-user:
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/test-virgin-user.yml
|
||||
|
||||
test-virgin-root:
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
|
||||
codesniffer-shellcheck:
|
||||
uses: ./.github/workflows/codesniffer-shellcheck.yml
|
||||
lint-shell:
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/lint-shell.yml
|
||||
|
||||
codesniffer-ruff:
|
||||
uses: ./.github/workflows/codesniffer-ruff.yml
|
||||
lint-python:
|
||||
permissions:
|
||||
contents: read
|
||||
uses: ./.github/workflows/lint-python.yml
|
||||
|
||||
lint-docker:
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
uses: ./.github/workflows/lint-docker.yml
|
||||
|
||||
40
.github/workflows/lint-docker.yml
vendored
Normal file
40
.github/workflows/lint-docker.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: Docker Linter
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
lint-docker:
|
||||
name: Lint Dockerfile
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Run hadolint (produce SARIF)
|
||||
id: hadolint
|
||||
continue-on-error: true
|
||||
uses: hadolint/hadolint-action@2332a7b74a6de0dda2e2221d575162eba76ba5e5
|
||||
with:
|
||||
dockerfile: ./Dockerfile
|
||||
format: sarif
|
||||
output-file: hadolint-results.sarif
|
||||
failure-threshold: warning
|
||||
|
||||
- name: Upload analysis results to GitHub
|
||||
if: always()
|
||||
uses: github/codeql-action/upload-sarif@v4
|
||||
with:
|
||||
sarif_file: hadolint-results.sarif
|
||||
wait-for-processing: true
|
||||
category: hadolint
|
||||
|
||||
- name: Fail if SARIF contains warnings or errors
|
||||
if: always()
|
||||
run: python3 src/pkgmgr/github/check_hadolint_sarif.py hadolint-results.sarif
|
||||
@@ -3,8 +3,11 @@ name: Ruff (Python code sniffer)
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
codesniffer-ruff:
|
||||
lint-python:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
@@ -3,8 +3,11 @@ name: ShellCheck
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
codesniffer-shellcheck:
|
||||
lint-shell:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
113
.github/workflows/mark-stable.yml
vendored
113
.github/workflows/mark-stable.yml
vendored
@@ -1,110 +1,39 @@
|
||||
name: Mark stable commit
|
||||
|
||||
concurrency:
|
||||
group: mark-stable-${{ github.repository }}-main
|
||||
cancel-in-progress: true
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main # still run tests for main
|
||||
tags:
|
||||
- 'v*' # run tests for version tags (e.g. v0.9.1)
|
||||
|
||||
- 'v*'
|
||||
jobs:
|
||||
test-unit:
|
||||
uses: ./.github/workflows/test-unit.yml
|
||||
|
||||
test-integration:
|
||||
uses: ./.github/workflows/test-integration.yml
|
||||
|
||||
test-env-virtual:
|
||||
uses: ./.github/workflows/test-env-virtual.yml
|
||||
|
||||
test-env-nix:
|
||||
uses: ./.github/workflows/test-env-nix.yml
|
||||
|
||||
test-e2e:
|
||||
uses: ./.github/workflows/test-e2e.yml
|
||||
|
||||
test-virgin-user:
|
||||
uses: ./.github/workflows/test-virgin-user.yml
|
||||
|
||||
test-virgin-root:
|
||||
uses: ./.github/workflows/test-virgin-root.yml
|
||||
|
||||
codesniffer-shellcheck:
|
||||
uses: ./.github/workflows/codesniffer-shellcheck.yml
|
||||
|
||||
codesniffer-ruff:
|
||||
uses: ./.github/workflows/codesniffer-ruff.yml
|
||||
|
||||
mark-stable:
|
||||
needs:
|
||||
- codesniffer-shellcheck
|
||||
- codesniffer-ruff
|
||||
- test-unit
|
||||
- test-integration
|
||||
- test-env-nix
|
||||
- test-env-virtual
|
||||
- test-e2e
|
||||
- test-virgin-user
|
||||
- test-virgin-root
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
# Only run this job if the push is for a version tag (v*)
|
||||
if: startsWith(github.ref, 'refs/tags/v')
|
||||
timeout-minutes: 330
|
||||
|
||||
permissions:
|
||||
contents: write # Required to move/update the tag
|
||||
actions: read
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true # We need all tags for version comparison
|
||||
fetch-tags: true # We need tags and main history for version comparison
|
||||
|
||||
- name: Check whether tagged commit is on main
|
||||
id: branch-check
|
||||
run: bash scripts/github/common/check-tagged-commit-on-main.sh
|
||||
|
||||
- name: Wait for CI success on main for this commit
|
||||
if: steps.branch-check.outputs.is_on_main == 'true'
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: bash scripts/github/mark-stable/wait-for-main-ci-success.sh
|
||||
|
||||
- name: Move 'stable' tag only if this version is the highest
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
echo "Ref: $GITHUB_REF"
|
||||
echo "SHA: $GITHUB_SHA"
|
||||
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
echo "Current version tag: ${VERSION}"
|
||||
|
||||
echo "Collecting all version tags..."
|
||||
ALL_V_TAGS="$(git tag --list 'v*' || true)"
|
||||
|
||||
if [[ -z "${ALL_V_TAGS}" ]]; then
|
||||
echo "No version tags found. Skipping stable update."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "All version tags:"
|
||||
echo "${ALL_V_TAGS}"
|
||||
|
||||
# Determine highest version using natural version sorting
|
||||
LATEST_TAG="$(printf '%s\n' ${ALL_V_TAGS} | sort -V | tail -n1)"
|
||||
|
||||
echo "Highest version tag: ${LATEST_TAG}"
|
||||
|
||||
if [[ "${VERSION}" != "${LATEST_TAG}" ]]; then
|
||||
echo "Current version ${VERSION} is NOT the highest version."
|
||||
echo "Stable tag will NOT be updated."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Current version ${VERSION} IS the highest version."
|
||||
echo "Updating 'stable' tag..."
|
||||
|
||||
# Delete existing stable tag (local + remote)
|
||||
git tag -d stable 2>/dev/null || true
|
||||
git push origin :refs/tags/stable || true
|
||||
|
||||
# Create new stable tag
|
||||
git tag stable "$GITHUB_SHA"
|
||||
git push origin stable
|
||||
|
||||
echo "✅ Stable tag updated to ${VERSION}."
|
||||
if: steps.branch-check.outputs.is_on_main == 'true'
|
||||
run: bash scripts/github/mark-stable/mark-stable-if-highest-version.sh
|
||||
|
||||
49
.github/workflows/publish-containers.yml
vendored
49
.github/workflows/publish-containers.yml
vendored
@@ -19,48 +19,41 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
fetch-tags: true
|
||||
|
||||
- name: Checkout workflow_run commit and refresh tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
git checkout -f "${{ github.event.workflow_run.head_sha }}"
|
||||
git fetch --tags --force
|
||||
git tag --list 'stable' 'v*' --sort=version:refname | tail -n 20
|
||||
env:
|
||||
WORKFLOW_RUN_SHA: ${{ github.event.workflow_run.head_sha }}
|
||||
run: bash scripts/github/publish-containers/checkout-workflow-run-commit.sh
|
||||
|
||||
- name: Check whether tagged commit is on main
|
||||
id: branch-check
|
||||
env:
|
||||
TARGET_SHA: ${{ github.event.workflow_run.head_sha }}
|
||||
run: bash scripts/github/common/check-tagged-commit-on-main.sh
|
||||
|
||||
- name: Compute version and stable flag
|
||||
id: info
|
||||
run: |
|
||||
set -euo pipefail
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
|
||||
V_TAG="$(git tag --points-at "${SHA}" --list 'v*' | sort -V | tail -n1)"
|
||||
[[ -n "$V_TAG" ]] || { echo "No version tag found"; exit 1; }
|
||||
VERSION="${V_TAG#v}"
|
||||
|
||||
STABLE_SHA="$(git rev-parse -q --verify refs/tags/stable^{commit} 2>/dev/null || true)"
|
||||
IS_STABLE=false
|
||||
[[ -n "${STABLE_SHA}" && "${STABLE_SHA}" == "${SHA}" ]] && IS_STABLE=true
|
||||
|
||||
echo "version=${VERSION}" >> "$GITHUB_OUTPUT"
|
||||
echo "is_stable=${IS_STABLE}" >> "$GITHUB_OUTPUT"
|
||||
if: steps.branch-check.outputs.is_on_main == 'true'
|
||||
run: bash scripts/github/publish-containers/compute-publish-container-info.sh
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
if: ${{ steps.info.outputs.should_publish == 'true' }}
|
||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f
|
||||
with:
|
||||
use: true
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
if: ${{ steps.info.outputs.should_publish == 'true' }}
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Publish all images
|
||||
run: |
|
||||
set -euo pipefail
|
||||
OWNER="${{ github.repository_owner }}" \
|
||||
VERSION="${{ steps.info.outputs.version }}" \
|
||||
IS_STABLE="${{ steps.info.outputs.is_stable }}" \
|
||||
bash scripts/build/publish.sh
|
||||
if: ${{ steps.info.outputs.should_publish == 'true' }}
|
||||
env:
|
||||
OWNER: ${{ github.repository_owner }}
|
||||
VERSION: ${{ steps.info.outputs.version }}
|
||||
IS_STABLE: ${{ steps.info.outputs.is_stable }}
|
||||
run: bash scripts/github/publish-containers/publish-container-images.sh
|
||||
|
||||
47
.github/workflows/security-codeql.yml
vendored
Normal file
47
.github/workflows/security-codeql.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: CodeQL Advanced
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Check security
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
security-events: write
|
||||
packages: read
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- language: actions
|
||||
build-mode: none
|
||||
- language: python
|
||||
build-mode: none
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v4
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
build-mode: ${{ matrix.build-mode }}
|
||||
queries: security-extended,security-and-quality
|
||||
|
||||
- name: Run manual build steps
|
||||
if: matrix.build-mode == 'manual'
|
||||
shell: bash
|
||||
run: |
|
||||
echo 'If you are using a "manual" build mode for one or more of the' \
|
||||
'languages you are analyzing, replace this with the commands to build' \
|
||||
'your code.'
|
||||
exit 1
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v4
|
||||
with:
|
||||
category: "/language:${{ matrix.language }}"
|
||||
7
.github/workflows/test-e2e.yml
vendored
7
.github/workflows/test-e2e.yml
vendored
@@ -3,6 +3,9 @@ name: Test End-To-End
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-e2e:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -11,7 +14,9 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
env:
|
||||
NIX_CONFIG: |
|
||||
access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
7
.github/workflows/test-env-nix.yml
vendored
7
.github/workflows/test-env-nix.yml
vendored
@@ -3,6 +3,9 @@ name: Test Virgin Nix (flake only)
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-env-nix:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -12,7 +15,9 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
env:
|
||||
NIX_CONFIG: |
|
||||
access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
7
.github/workflows/test-env-virtual.yml
vendored
7
.github/workflows/test-env-virtual.yml
vendored
@@ -3,6 +3,9 @@ name: Test OS Containers
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-env-virtual:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -11,7 +14,9 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
env:
|
||||
NIX_CONFIG: |
|
||||
access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
7
.github/workflows/test-integration.yml
vendored
7
.github/workflows/test-integration.yml
vendored
@@ -3,11 +3,16 @@ name: Test Code Integration
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-integration:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
env:
|
||||
NIX_CONFIG: |
|
||||
access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
7
.github/workflows/test-unit.yml
vendored
7
.github/workflows/test-unit.yml
vendored
@@ -3,11 +3,16 @@ name: Test Units
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-unit:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
|
||||
env:
|
||||
NIX_CONFIG: |
|
||||
access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
20
.github/workflows/test-virgin-root.yml
vendored
20
.github/workflows/test-virgin-root.yml
vendored
@@ -3,6 +3,9 @@ name: Test Virgin Root
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-virgin-root:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -11,7 +14,9 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
env:
|
||||
NIX_CONFIG: |
|
||||
access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -19,38 +24,35 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
# 🔹 BUILD virgin image if missing
|
||||
- name: Build virgin container (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make build-missing-virgin
|
||||
|
||||
# 🔹 RUN test inside virgin image
|
||||
- name: Virgin ${{ matrix.distro }} pkgmgr test (root)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
docker run --rm \
|
||||
-v "$PWD":/src \
|
||||
-v "$PWD":/opt/src/pkgmgr \
|
||||
-v pkgmgr_repos:/root/Repositories \
|
||||
-v pkgmgr_pip_cache:/root/.cache/pip \
|
||||
-w /src \
|
||||
-e NIX_CONFIG="${NIX_CONFIG}" \
|
||||
-w /opt/src/pkgmgr \
|
||||
"pkgmgr-${{ matrix.distro }}-virgin" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
git config --global --add safe.directory /src
|
||||
git config --global --add safe.directory /opt/src/pkgmgr
|
||||
|
||||
make install
|
||||
make setup
|
||||
|
||||
. "$HOME/.venvs/pkgmgr/bin/activate"
|
||||
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
pkgmgr update pkgmgr --clone-mode shallow --no-verification
|
||||
pkgmgr version pkgmgr
|
||||
|
||||
echo ">>> Running Nix-based: nix run .#pkgmgr -- version pkgmgr"
|
||||
nix run /src#pkgmgr -- version pkgmgr
|
||||
nix run /opt/src/pkgmgr#pkgmgr -- version pkgmgr
|
||||
'
|
||||
|
||||
41
.github/workflows/test-virgin-user.yml
vendored
41
.github/workflows/test-virgin-user.yml
vendored
@@ -3,6 +3,9 @@ name: Test Virgin User
|
||||
on:
|
||||
workflow_call:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-virgin-user:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -11,7 +14,9 @@ jobs:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
distro: [arch, debian, ubuntu, fedora, centos]
|
||||
|
||||
env:
|
||||
NIX_CONFIG: |
|
||||
access-tokens = github.com=${{ secrets.GITHUB_TOKEN }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -19,20 +24,19 @@ jobs:
|
||||
- name: Show Docker version
|
||||
run: docker version
|
||||
|
||||
# 🔹 BUILD virgin image if missing
|
||||
- name: Build virgin container (${{ matrix.distro }})
|
||||
run: |
|
||||
set -euo pipefail
|
||||
PKGMGR_DISTRO="${{ matrix.distro }}" make build-missing-virgin
|
||||
|
||||
# 🔹 RUN test inside virgin image as non-root
|
||||
- name: Virgin ${{ matrix.distro }} pkgmgr test (user)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
docker run --rm \
|
||||
-v "$PWD":/src \
|
||||
-w /src \
|
||||
-v "$PWD":/opt/src/pkgmgr \
|
||||
-e NIX_CONFIG="${NIX_CONFIG}" \
|
||||
-w /opt/src/pkgmgr \
|
||||
"pkgmgr-${{ matrix.distro }}-virgin" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
@@ -42,24 +46,25 @@ jobs:
|
||||
useradd -m dev
|
||||
echo "dev ALL=(ALL) NOPASSWD: ALL" > /etc/sudoers.d/dev
|
||||
chmod 0440 /etc/sudoers.d/dev
|
||||
chown -R dev:dev /src
|
||||
chown -R dev:dev /opt/src/pkgmgr
|
||||
|
||||
mkdir -p /nix/store /nix/var/nix /nix/var/log/nix /nix/var/nix/profiles
|
||||
chown -R dev:dev /nix
|
||||
chmod 0755 /nix
|
||||
chmod 1777 /nix/store
|
||||
sudo -H -u dev env \
|
||||
HOME=/home/dev \
|
||||
NIX_CONFIG="$NIX_CONFIG" \
|
||||
PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 \
|
||||
bash -lc "
|
||||
set -euo pipefail
|
||||
cd /opt/src/pkgmgr
|
||||
make setup-venv
|
||||
. \"\$HOME/.venvs/pkgmgr/bin/activate\"
|
||||
|
||||
sudo -H -u dev env HOME=/home/dev PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 bash -lc "
|
||||
set -euo pipefail
|
||||
cd /src
|
||||
pkgmgr version pkgmgr
|
||||
|
||||
make setup-venv
|
||||
. \"\$HOME/.venvs/pkgmgr/bin/activate\"
|
||||
|
||||
pkgmgr version pkgmgr
|
||||
|
||||
export NIX_REMOTE=local
|
||||
export NIX_CONFIG=\"experimental-features = nix-command flakes\"
|
||||
nix run /src#pkgmgr -- version pkgmgr
|
||||
"
|
||||
export NIX_REMOTE=local
|
||||
nix run /opt/src/pkgmgr#pkgmgr -- version pkgmgr
|
||||
"
|
||||
'
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -24,10 +24,9 @@ package-manager-*
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# Nix Cache to speed up tests
|
||||
# Nix cache to speed up tests
|
||||
.nix/
|
||||
.nix-dev-installed
|
||||
flake.lock
|
||||
|
||||
# Ignore logs
|
||||
*.log
|
||||
|
||||
228
CHANGELOG.md
228
CHANGELOG.md
@@ -1,3 +1,231 @@
|
||||
## [1.13.3] - 2026-03-26
|
||||
|
||||
* CI pipelines now include automated security scanning (CodeQL, Docker lint), increasing detection of vulnerabilities and misconfigurations
|
||||
* Workflow permissions were tightened and fixed, ensuring secure and reliable execution of reusable workflows
|
||||
* Publishing and “stable” tagging are now restricted to the `main` branch, preventing accidental releases from other branches
|
||||
* Stale CI runs are automatically cancelled, reducing wasted resources and speeding up feedback cycles
|
||||
* Overall CI reliability and security posture improved, with fewer false positives and more consistent pipeline results
|
||||
|
||||
|
||||
## [1.13.2] - 2026-03-26
|
||||
|
||||
* Fail fast with a clear error when the Nix bootstrap or nix binary is unavailable instead of continuing with a broken startup path.
|
||||
|
||||
|
||||
## [1.13.1] - 2026-03-20
|
||||
|
||||
* Fixed misleading GPG verification failures by adding explicit git and gnupg runtime dependencies and surfacing signing-key lookup errors accurately.
|
||||
|
||||
|
||||
## [1.13.0] - 2026-03-20
|
||||
|
||||
* Set CentOS docker image to latest
|
||||
|
||||
|
||||
## [1.12.5] - 2026-02-24
|
||||
|
||||
* The stable-tag workflow now waits up to two hours for a successful main-branch CI run on the same commit before updating stable.
|
||||
|
||||
|
||||
## [1.12.4] - 2026-02-24
|
||||
|
||||
* The release pipeline now updates the stable tag only for v* tags after a successful CI run on main for the same commit, while avoiding duplicate test executions.
|
||||
|
||||
|
||||
## [1.12.3] - 2026-02-24
|
||||
|
||||
* Stabilized Nix-based builds by switching to nixos-25.11 and committing flake.lock, ensuring reproducible pkgmgr test/runtime environments (with pip) and avoiding transient sphinx/Python 3.11 breakage.
|
||||
|
||||
|
||||
## [1.12.2] - 2026-02-24
|
||||
|
||||
* Removed infinito-sphinx package
|
||||
|
||||
|
||||
## [1.12.1] - 2026-02-14
|
||||
|
||||
* pkgmgr now prefers distro-managed nix binaries on Arch before profile/PATH resolution, preventing libllhttp mismatch failures after pacman system upgrades.
|
||||
|
||||
|
||||
## [1.12.0] - 2026-02-08
|
||||
|
||||
* Adds explicit concurrency groups to the CI and mark-stable workflows to prevent overlapping runs on the same branch and make pipeline execution more predictable.
|
||||
|
||||
|
||||
## [1.11.2] - 2026-02-08
|
||||
|
||||
* Removes the v* tag trigger from the mark-stable workflow so it runs only on branch pushes and avoids duplicate executions during releases.
|
||||
|
||||
|
||||
## [1.11.1] - 2026-02-08
|
||||
|
||||
* Implements pushing the branch and the version tag together in a single command so the CI release workflow can reliably detect the version tag on HEAD.
|
||||
|
||||
|
||||
## [1.11.0] - 2026-01-21
|
||||
|
||||
* Adds a dedicated slim Docker image for pkgmgr and publishes slim variants for all supported distros.
|
||||
|
||||
|
||||
## [1.10.0] - 2026-01-20
|
||||
|
||||
* Introduce safe verbose image cleanup to reduce Docker image size and build artifacts
|
||||
|
||||
## [1.9.5] - 2026-01-16
|
||||
|
||||
* Release patch: improve git pull error diagnostics
|
||||
|
||||
|
||||
## [1.9.4] - 2026-01-13
|
||||
|
||||
* fix(ci): replace sudo with su for user switching to avoid PAM failures in minimal container images
|
||||
|
||||
|
||||
## [1.9.3] - 2026-01-07
|
||||
|
||||
* Made the Nix dependency optional on non-x86_64 architectures to avoid broken Arch Linux ARM repository packages.
|
||||
|
||||
|
||||
## [1.9.2] - 2025-12-21
|
||||
|
||||
* Default configuration files are now packaged and loaded correctly when no user config exists, while fully preserving custom user configurations.
|
||||
|
||||
|
||||
## [1.9.1] - 2025-12-21
|
||||
|
||||
* Fixed installation issues and improved loading of default configuration files.
|
||||
|
||||
|
||||
## [1.9.0] - 2025-12-20
|
||||
|
||||
* * New ***mirror visibility*** command to set remote Git repositories to ***public*** or ***private***.
|
||||
* New ***--public*** flag for ***mirror provision*** to create repositories and immediately make them public.
|
||||
* All configured git mirrors are now provisioned.
|
||||
|
||||
|
||||
## [1.8.7] - 2025-12-19
|
||||
|
||||
* * **Release version updates now correctly modify ***pyproject.toml*** files that follow PEP 621**, ensuring the ***[project].version*** field is updated as expected.
|
||||
* **Invalid or incomplete ***pyproject.toml*** files are now handled gracefully** with clear error messages instead of abrupt process termination.
|
||||
* **RPM spec files remain compatible during releases**: existing macros such as ***%{?dist}*** are preserved and no longer accidentally modified.
|
||||
|
||||
|
||||
## [1.8.6] - 2025-12-17
|
||||
|
||||
* Prevent Rate Limits during GitHub Nix Setups
|
||||
|
||||
|
||||
## [1.8.5] - 2025-12-17
|
||||
|
||||
* * Clearer Git error handling, especially when a directory is not a Git repository.
|
||||
* More reliable repository verification with improved commit and GPG signature checks.
|
||||
* Better error messages and overall robustness when working with Git-based workflows.
|
||||
|
||||
|
||||
## [1.9.0] - 2025-12-17
|
||||
|
||||
* Automated release.
|
||||
|
||||
|
||||
## [1.8.4] - 2025-12-17
|
||||
|
||||
* * Made pkgmgr’s base-layer role explicit by standardizing the Docker/CI mount path to *`/opt/src/pkgmgr`*.
|
||||
|
||||
|
||||
## [1.8.3] - 2025-12-16
|
||||
|
||||
* MIRRORS now supports plain URL entries, ensuring metadata-only sources like PyPI are recorded without ever being added to the Git configuration.
|
||||
|
||||
|
||||
## [1.8.2] - 2025-12-16
|
||||
|
||||
* * ***pkgmgr tools code*** is more robust and predictable: it now fails early with clear errors if VS Code is not installed or a repository is not yet identified.
|
||||
|
||||
|
||||
## [1.8.1] - 2025-12-16
|
||||
|
||||
* * Improved stability and consistency of all Git operations (clone, pull, push, release, branch handling) with clearer error messages and predictable preview behavior.
|
||||
* Mirrors are now handled cleanly: only valid Git remotes are used for Git operations, while non-Git URLs (e.g. PyPI) are excluded, preventing broken or confusing repository configs.
|
||||
* GitHub authentication is more robust: tokens are automatically resolved via the GitHub CLI (`gh`), invalid stored tokens are replaced, and interactive prompts occur only when necessary.
|
||||
* Repository creation and release workflows are more reliable, producing cleaner Git configurations and more predictable version handling.
|
||||
|
||||
|
||||
## [1.8.0] - 2025-12-15
|
||||
|
||||
* *** New Features: ***
|
||||
- **Silent Updates**: You can now use the `--silent` flag during installs and updates to suppress error messages for individual repositories and get a single summary at the end. This ensures the process continues even if some repositories fail, while still preserving interactive checks when not in silent mode.
|
||||
- **Repository Scaffolding**: The process for creating new repositories has been improved. You can now use templates to scaffold repositories with a preview and automatic mirror setup.
|
||||
|
||||
*** Bug Fixes: ***
|
||||
- **Pip Installation**: Pip is now installed automatically on all supported systems. This includes `python-pip` for Arch and `python3-pip` for CentOS, Debian, Fedora, and Ubuntu, ensuring that pip is available for Python package installations.
|
||||
- **Pacman Keyring**: Fixed an issue on Arch Linux where package installation would fail due to missing keys. The pacman keyring is now properly initialized before installing packages.
|
||||
|
||||
|
||||
## [1.7.2] - 2025-12-15
|
||||
|
||||
* * Git mirrors are now resolved consistently (origin → MIRRORS file → config → default).
|
||||
* The `origin` remote is always enforced to use the primary URL for both fetch and push.
|
||||
* Additional mirrors are added as extra push targets without duplication.
|
||||
* Local and remote mirror setup behaves more predictably and consistently.
|
||||
* Improved test coverage ensures stable origin and push URL handling.
|
||||
|
||||
|
||||
## [1.7.1] - 2025-12-14
|
||||
|
||||
* Patched package-manager to kpmx to publish on pypi
|
||||
|
||||
|
||||
## [1.7.0] - 2025-12-14
|
||||
|
||||
* * New *pkgmgr publish* command to publish repository artifacts to PyPI based on the *MIRRORS* file.
|
||||
* Automatically selects the current repository when no explicit selection is given.
|
||||
* Publishes only when a semantic version tag is present on *HEAD*; otherwise skips with a clear info message.
|
||||
* Supports non-interactive mode for CI environments via *--non-interactive*.
|
||||
|
||||
|
||||
## [1.6.4] - 2025-12-14
|
||||
|
||||
* * Improved reliability of Nix installs and updates, including automatic resolution of profile conflicts and better handling of GitHub 403 rate limits.
|
||||
* More stable launcher behavior in packaged and virtual-env setups.
|
||||
* Enhanced mirror and remote handling: repository owner/name are derived from URLs, with smoother provisioning and clearer credential handling.
|
||||
* More reliable releases and artifacts due to safer CI behavior when no version tag is present.
|
||||
|
||||
|
||||
## [1.6.3] - 2025-12-14
|
||||
|
||||
* ***Fixed:*** Corrected repository path resolution so release and version logic consistently use the canonical packaging/* layout, preventing changelog and packaging files from being read or updated from incorrect locations.
|
||||
|
||||
|
||||
## [1.6.2] - 2025-12-14
|
||||
|
||||
* **pkgmgr version** now also shows the installed pkgmgr version when run outside a repository.
|
||||
|
||||
|
||||
## [1.6.1] - 2025-12-14
|
||||
|
||||
* * Added automatic retry handling for GitHub 403 / rate-limit errors during Nix flake installs (Fibonacci backoff with jitter).
|
||||
|
||||
|
||||
## [1.6.0] - 2025-12-14
|
||||
|
||||
* *** Changed ***
|
||||
- Unified update handling via a single top-level `pkgmgr update` command, removing ambiguous update paths.
|
||||
- Improved update reliability by routing all update logic through a central UpdateManager.
|
||||
- Renamed system update flag from `--system-update` to `--system` for clarity and consistency.
|
||||
- Made mirror handling explicit and safer by separating setup, check, and provision responsibilities.
|
||||
- Improved credential resolution for remote providers (environment → keyring → interactive).
|
||||
|
||||
*** Added ***
|
||||
- Optional system updates via `pkgmgr update --system` (Arch, Debian/Ubuntu, Fedora/RHEL).
|
||||
- `pkgmgr install --update` to force re-running installers and refresh existing installations.
|
||||
- Remote repository provisioning for mirrors on supported providers.
|
||||
- Extended end-to-end test coverage for update and mirror workflows.
|
||||
|
||||
*** Fixed ***
|
||||
- Resolved “Unknown repos command: update” errors after CLI refactoring.
|
||||
- Improved Nix update stability and reduced CI failures caused by transient rate limits.
|
||||
|
||||
|
||||
## [1.5.0] - 2025-12-13
|
||||
|
||||
* - Commands now show live output while running, making long operations easier to follow
|
||||
|
||||
21
Dockerfile
21
Dockerfile
@@ -33,26 +33,35 @@ CMD ["bash"]
|
||||
# - inherits from virgin
|
||||
# - builds + installs pkgmgr
|
||||
# - sets entrypoint + default cmd
|
||||
# - NOTE: does NOT run slim.sh (that is done in slim stage)
|
||||
# ============================================================
|
||||
FROM virgin AS full
|
||||
|
||||
# Nix environment defaults (only config; nix itself comes from deps/install flow)
|
||||
ENV NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Copy full repository for build
|
||||
COPY . .
|
||||
|
||||
# Build and install distro-native package-manager package
|
||||
RUN set -euo pipefail; \
|
||||
RUN set -eu; \
|
||||
echo "Building and installing package-manager via make install..."; \
|
||||
make install; \
|
||||
cd /; rm -rf /build
|
||||
rm -rf /build
|
||||
|
||||
# Entry point
|
||||
COPY scripts/docker/entry.sh /usr/local/bin/docker-entry.sh
|
||||
|
||||
WORKDIR /src
|
||||
WORKDIR /opt/src/pkgmgr
|
||||
ENTRYPOINT ["/usr/local/bin/docker-entry.sh"]
|
||||
CMD ["pkgmgr", "--help"]
|
||||
|
||||
|
||||
# ============================================================
|
||||
# Target: slim
|
||||
# - based on full
|
||||
# - runs slim.sh
|
||||
# ============================================================
|
||||
FROM full AS slim
|
||||
|
||||
COPY scripts/docker/slim.sh /usr/local/bin/slim.sh
|
||||
RUN chmod +x /usr/local/bin/slim.sh && /usr/local/bin/slim.sh
|
||||
|
||||
3
MIRRORS
3
MIRRORS
@@ -1,3 +1,4 @@
|
||||
git@github.com:kevinveenbirkenbach/package-manager.git
|
||||
ssh://git@git.veen.world:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
ssh://git@code.cymais.cloud:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
ssh://git@code.infinito.nexus:2201/kevinveenbirkenbach/pkgmgr.git
|
||||
https://pypi.org/project/kpmx/
|
||||
|
||||
6
Makefile
6
Makefile
@@ -10,6 +10,10 @@ DISTROS ?= arch debian ubuntu fedora centos
|
||||
PKGMGR_DISTRO ?= arch
|
||||
export PKGMGR_DISTRO
|
||||
|
||||
# Nix Config Variable (To avoid rate limit)
|
||||
NIX_CONFIG ?=
|
||||
export NIX_CONFIG
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Base images
|
||||
# (kept for documentation/reference; actual build logic is in scripts/build)
|
||||
@@ -44,7 +48,7 @@ install:
|
||||
# ------------------------------------------------------------
|
||||
|
||||
# Default: keep current auto-detection behavior
|
||||
setup: setup-nix setup-venv
|
||||
setup: setup-venv
|
||||
|
||||
# Explicit: developer setup (Python venv + shell RC + install)
|
||||
setup-venv: setup-nix
|
||||
|
||||
27
flake.lock
generated
Normal file
27
flake.lock
generated
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"nodes": {
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1771714954,
|
||||
"narHash": "sha256-nhZJPnBavtu40/L2aqpljrfUNb2rxmWTmSjK2c9UKds=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "afbbf774e2087c3d734266c22f96fca2e78d3620",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-25.11",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"nixpkgs": "nixpkgs"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
13
flake.nix
13
flake.nix
@@ -6,7 +6,7 @@
|
||||
};
|
||||
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-25.11";
|
||||
};
|
||||
|
||||
outputs = { self, nixpkgs }:
|
||||
@@ -32,7 +32,7 @@
|
||||
rec {
|
||||
pkgmgr = pyPkgs.buildPythonApplication {
|
||||
pname = "package-manager";
|
||||
version = "1.5.0";
|
||||
version = "1.13.3";
|
||||
|
||||
# Use the git repo as source
|
||||
src = ./.;
|
||||
@@ -40,6 +40,10 @@
|
||||
# Build using pyproject.toml
|
||||
format = "pyproject";
|
||||
|
||||
# Clear any stale wheels carried in from the source tree so
|
||||
# pypaInstallPhase doesn't collide on bin/pkgmgr.
|
||||
preBuild = "rm -rf dist";
|
||||
|
||||
# Build backend requirements from [build-system]
|
||||
nativeBuildInputs = [
|
||||
pyPkgs.setuptools
|
||||
@@ -49,7 +53,10 @@
|
||||
# Runtime dependencies (matches [project.dependencies] in pyproject.toml)
|
||||
propagatedBuildInputs = [
|
||||
pyPkgs.pyyaml
|
||||
pyPkgs.jinja2
|
||||
pyPkgs.pip
|
||||
pkgs.git
|
||||
pkgs.gnupg
|
||||
];
|
||||
|
||||
doCheck = false;
|
||||
@@ -78,6 +85,7 @@
|
||||
pythonWithDeps = python.withPackages (ps: [
|
||||
ps.pip
|
||||
ps.pyyaml
|
||||
ps.jinja2
|
||||
]);
|
||||
in
|
||||
{
|
||||
@@ -85,6 +93,7 @@
|
||||
buildInputs = [
|
||||
pythonWithDeps
|
||||
pkgs.git
|
||||
pkgs.gnupg
|
||||
ansiblePkg
|
||||
];
|
||||
|
||||
|
||||
@@ -1,15 +1,25 @@
|
||||
# Maintainer: Kevin Veen-Birkenbach <info@veen.world>
|
||||
|
||||
pkgname=package-manager
|
||||
pkgver=0.9.1
|
||||
pkgver=1.13.3
|
||||
pkgrel=1
|
||||
pkgdesc="Local-flake wrapper for Kevin's package-manager (Nix-based)."
|
||||
arch=('any')
|
||||
url="https://github.com/kevinveenbirkenbach/package-manager"
|
||||
license=('MIT')
|
||||
|
||||
# Nix is the only runtime dependency; Python is provided by the Nix closure.
|
||||
depends=('nix')
|
||||
# Nix is required at runtime to run pkgmgr via the flake.
|
||||
# On Arch x86_64 we can depend on the distro package.
|
||||
# On other arches (e.g. ARM) we only declare it as optional because the
|
||||
# repo package may be broken/out-of-sync; installation can be done via the official installer.
|
||||
depends=()
|
||||
optdepends=('nix: required to run pkgmgr via flake')
|
||||
|
||||
if [[ "${CARCH}" == "x86_64" ]]; then
|
||||
depends=('nix')
|
||||
optdepends=()
|
||||
fi
|
||||
|
||||
makedepends=('rsync')
|
||||
|
||||
install=${pkgname}.install
|
||||
@@ -47,7 +57,7 @@ package() {
|
||||
cd "$srcdir/$_srcdir_name"
|
||||
|
||||
# Install the wrapper into /usr/bin
|
||||
install -Dm0755 "scripts/pkgmgr-wrapper.sh" \
|
||||
install -Dm0755 "scripts/launcher.sh" \
|
||||
"$pkgdir/usr/bin/pkgmgr"
|
||||
|
||||
# Install Nix bootstrap (init + lib)
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
post_install() {
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
/usr/lib/package-manager/nix/init.sh
|
||||
}
|
||||
|
||||
post_upgrade() {
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
/usr/lib/package-manager/nix/init.sh
|
||||
}
|
||||
|
||||
post_remove() {
|
||||
|
||||
@@ -1,3 +1,236 @@
|
||||
package-manager (1.13.3-1) unstable; urgency=medium
|
||||
|
||||
* CI pipelines now include automated security scanning (CodeQL, Docker lint), increasing detection of vulnerabilities and misconfigurations
|
||||
* Workflow permissions were tightened and fixed, ensuring secure and reliable execution of reusable workflows
|
||||
* Publishing and “stable” tagging are now restricted to the `main` branch, preventing accidental releases from other branches
|
||||
* Stale CI runs are automatically cancelled, reducing wasted resources and speeding up feedback cycles
|
||||
* Overall CI reliability and security posture improved, with fewer false positives and more consistent pipeline results
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Thu, 26 Mar 2026 17:10:21 +0100
|
||||
|
||||
package-manager (1.13.2-1) unstable; urgency=medium
|
||||
|
||||
* Fail fast with a clear error when the Nix bootstrap or nix binary is unavailable instead of continuing with a broken startup path.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Thu, 26 Mar 2026 12:26:55 +0100
|
||||
|
||||
package-manager (1.13.1-1) unstable; urgency=medium
|
||||
|
||||
* Fixed misleading GPG verification failures by adding explicit git and gnupg runtime dependencies and surfacing signing-key lookup errors accurately.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Fri, 20 Mar 2026 02:57:25 +0100
|
||||
|
||||
package-manager (1.13.0-1) unstable; urgency=medium
|
||||
|
||||
* Set CentOS docker image to latest
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Fri, 20 Mar 2026 01:29:38 +0100
|
||||
|
||||
package-manager (1.12.5-1) unstable; urgency=medium
|
||||
|
||||
* The stable-tag workflow now waits up to two hours for a successful main-branch CI run on the same commit before updating stable.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 24 Feb 2026 09:35:39 +0100
|
||||
|
||||
package-manager (1.12.4-1) unstable; urgency=medium
|
||||
|
||||
* The release pipeline now updates the stable tag only for v* tags after a successful CI run on main for the same commit, while avoiding duplicate test executions.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 24 Feb 2026 09:32:01 +0100
|
||||
|
||||
package-manager (1.12.3-1) unstable; urgency=medium
|
||||
|
||||
* Stabilized Nix-based builds by switching to nixos-25.11 and committing flake.lock, ensuring reproducible pkgmgr test/runtime environments (with pip) and avoiding transient sphinx/Python 3.11 breakage.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 24 Feb 2026 08:29:34 +0100
|
||||
|
||||
package-manager (1.12.2-1) unstable; urgency=medium
|
||||
|
||||
* Removed infinito-sphinx package
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 24 Feb 2026 07:40:55 +0100
|
||||
|
||||
package-manager (1.12.1-1) unstable; urgency=medium
|
||||
|
||||
* pkgmgr now prefers distro-managed nix binaries on Arch before profile/PATH resolution, preventing libllhttp mismatch failures after pacman system upgrades.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sat, 14 Feb 2026 23:26:17 +0100
|
||||
|
||||
package-manager (1.12.0-1) unstable; urgency=medium
|
||||
|
||||
* Adds explicit concurrency groups to the CI and mark-stable workflows to prevent overlapping runs on the same branch and make pipeline execution more predictable.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 08 Feb 2026 18:26:25 +0100
|
||||
|
||||
package-manager (1.11.2-1) unstable; urgency=medium
|
||||
|
||||
* Removes the v* tag trigger from the mark-stable workflow so it runs only on branch pushes and avoids duplicate executions during releases.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 08 Feb 2026 18:21:50 +0100
|
||||
|
||||
package-manager (1.11.1-1) unstable; urgency=medium
|
||||
|
||||
* Implements pushing the branch and the version tag together in a single command so the CI release workflow can reliably detect the version tag on HEAD.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 08 Feb 2026 18:18:09 +0100
|
||||
|
||||
package-manager (1.11.0-1) unstable; urgency=medium
|
||||
|
||||
* Adds a dedicated slim Docker image for pkgmgr and publishes slim variants for all supported distros.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 21 Jan 2026 01:18:31 +0100
|
||||
|
||||
package-manager (1.10.0-1) unstable; urgency=medium
|
||||
|
||||
* Automated release.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 20 Jan 2026 10:44:58 +0100
|
||||
|
||||
package-manager (1.9.5-1) unstable; urgency=medium
|
||||
|
||||
* Release patch: improve git pull error diagnostics
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Fri, 16 Jan 2026 10:09:43 +0100
|
||||
|
||||
package-manager (1.9.4-1) unstable; urgency=medium
|
||||
|
||||
* fix(ci): replace sudo with su for user switching to avoid PAM failures in minimal container images
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 13 Jan 2026 14:48:50 +0100
|
||||
|
||||
package-manager (1.9.3-1) unstable; urgency=medium
|
||||
|
||||
* Made the Nix dependency optional on non-x86_64 architectures to avoid broken Arch Linux ARM repository packages.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 07 Jan 2026 13:44:40 +0100
|
||||
|
||||
package-manager (1.9.2-1) unstable; urgency=medium
|
||||
|
||||
* Default configuration files are now packaged and loaded correctly when no user config exists, while fully preserving custom user configurations.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 21 Dec 2025 15:30:22 +0100
|
||||
|
||||
package-manager (1.9.1-1) unstable; urgency=medium
|
||||
|
||||
* Fixed installation issues and improved loading of default configuration files.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 21 Dec 2025 13:38:58 +0100
|
||||
|
||||
package-manager (1.9.0-1) unstable; urgency=medium
|
||||
|
||||
* * New ***mirror visibility*** command to set remote Git repositories to ***public*** or ***private***.
|
||||
* New ***--public*** flag for ***mirror provision*** to create repositories and immediately make them public.
|
||||
* All configured git mirrors are now provisioned.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sat, 20 Dec 2025 14:37:58 +0100
|
||||
|
||||
package-manager (1.8.7-1) unstable; urgency=medium
|
||||
|
||||
* * **Release version updates now correctly modify ***pyproject.toml*** files that follow PEP 621**, ensuring the ***[project].version*** field is updated as expected.
|
||||
* **Invalid or incomplete ***pyproject.toml*** files are now handled gracefully** with clear error messages instead of abrupt process termination.
|
||||
* **RPM spec files remain compatible during releases**: existing macros such as ***%{?dist}*** are preserved and no longer accidentally modified.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Fri, 19 Dec 2025 14:15:47 +0100
|
||||
|
||||
package-manager (1.8.6-1) unstable; urgency=medium
|
||||
|
||||
* Prevent Rate Limits during GitHub Nix Setups
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 17 Dec 2025 23:50:31 +0100
|
||||
|
||||
package-manager (1.8.5-1) unstable; urgency=medium
|
||||
|
||||
* * Clearer Git error handling, especially when a directory is not a Git repository.
|
||||
* More reliable repository verification with improved commit and GPG signature checks.
|
||||
* Better error messages and overall robustness when working with Git-based workflows.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 17 Dec 2025 22:15:48 +0100
|
||||
|
||||
package-manager (1.9.0-1) unstable; urgency=medium
|
||||
|
||||
* Automated release.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 17 Dec 2025 22:10:31 +0100
|
||||
|
||||
package-manager (1.8.4-1) unstable; urgency=medium
|
||||
|
||||
* * Made pkgmgr’s base-layer role explicit by standardizing the Docker/CI mount path to *`/opt/src/pkgmgr`*.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Wed, 17 Dec 2025 11:20:16 +0100
|
||||
|
||||
package-manager (1.8.3-1) unstable; urgency=medium
|
||||
|
||||
* MIRRORS now supports plain URL entries, ensuring metadata-only sources like PyPI are recorded without ever being added to the Git configuration.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 16 Dec 2025 19:49:51 +0100
|
||||
|
||||
package-manager (1.8.2-1) unstable; urgency=medium
|
||||
|
||||
* * ***pkgmgr tools code*** is more robust and predictable: it now fails early with clear errors if VS Code is not installed or a repository is not yet identified.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 16 Dec 2025 19:22:41 +0100
|
||||
|
||||
package-manager (1.8.1-1) unstable; urgency=medium
|
||||
|
||||
* * Improved stability and consistency of all Git operations (clone, pull, push, release, branch handling) with clearer error messages and predictable preview behavior.
|
||||
* Mirrors are now handled cleanly: only valid Git remotes are used for Git operations, while non-Git URLs (e.g. PyPI) are excluded, preventing broken or confusing repository configs.
|
||||
* GitHub authentication is more robust: tokens are automatically resolved via the GitHub CLI (`gh`), invalid stored tokens are replaced, and interactive prompts occur only when necessary.
|
||||
* Repository creation and release workflows are more reliable, producing cleaner Git configurations and more predictable version handling.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Tue, 16 Dec 2025 18:06:35 +0100
|
||||
|
||||
package-manager (1.8.0-1) unstable; urgency=medium
|
||||
|
||||
* *** New Features: ***
|
||||
- **Silent Updates**: You can now use the `--silent` flag during installs and updates to suppress error messages for individual repositories and get a single summary at the end. This ensures the process continues even if some repositories fail, while still preserving interactive checks when not in silent mode.
|
||||
- **Repository Scaffolding**: The process for creating new repositories has been improved. You can now use templates to scaffold repositories with a preview and automatic mirror setup.
|
||||
|
||||
*** Bug Fixes: ***
|
||||
- **Pip Installation**: Pip is now installed automatically on all supported systems. This includes `python-pip` for Arch and `python3-pip` for CentOS, Debian, Fedora, and Ubuntu, ensuring that pip is available for Python package installations.
|
||||
- **Pacman Keyring**: Fixed an issue on Arch Linux where package installation would fail due to missing keys. The pacman keyring is now properly initialized before installing packages.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Mon, 15 Dec 2025 13:37:42 +0100
|
||||
|
||||
package-manager (1.7.2-1) unstable; urgency=medium
|
||||
|
||||
* * Git mirrors are now resolved consistently (origin → MIRRORS file → config → default).
|
||||
* The `origin` remote is always enforced to use the primary URL for both fetch and push.
|
||||
* Additional mirrors are added as extra push targets without duplication.
|
||||
* Local and remote mirror setup behaves more predictably and consistently.
|
||||
* Improved test coverage ensures stable origin and push URL handling.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Mon, 15 Dec 2025 00:53:26 +0100
|
||||
|
||||
package-manager (1.7.1-1) unstable; urgency=medium
|
||||
|
||||
* Patched package-manager to kpmx to publish on pypi
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 14 Dec 2025 21:19:11 +0100
|
||||
|
||||
package-manager (1.7.0-1) unstable; urgency=medium
|
||||
|
||||
* * New *pkgmgr publish* command to publish repository artifacts to PyPI based on the *MIRRORS* file.
|
||||
* Automatically selects the current repository when no explicit selection is given.
|
||||
* Publishes only when a semantic version tag is present on *HEAD*; otherwise skips with a clear info message.
|
||||
* Supports non-interactive mode for CI environments via *--non-interactive*.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 14 Dec 2025 21:10:06 +0100
|
||||
|
||||
package-manager (1.6.4-1) unstable; urgency=medium
|
||||
|
||||
* * Improved reliability of Nix installs and updates, including automatic resolution of profile conflicts and better handling of GitHub 403 rate limits.
|
||||
* More stable launcher behavior in packaged and virtual-env setups.
|
||||
* Enhanced mirror and remote handling: repository owner/name are derived from URLs, with smoother provisioning and clearer credential handling.
|
||||
* More reliable releases and artifacts due to safer CI behavior when no version tag is present.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 14 Dec 2025 19:33:07 +0100
|
||||
|
||||
package-manager (1.6.3-1) unstable; urgency=medium
|
||||
|
||||
* ***Fixed:*** Corrected repository path resolution so release and version logic consistently use the canonical packaging/* layout, preventing changelog and packaging files from being read or updated from incorrect locations.
|
||||
|
||||
-- Kevin Veen-Birkenbach <kevin@veen.world> Sun, 14 Dec 2025 13:39:52 +0100
|
||||
|
||||
package-manager (0.9.1-1) unstable; urgency=medium
|
||||
|
||||
* * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
|
||||
@@ -3,7 +3,7 @@ set -e
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
/usr/lib/package-manager/nix/init.sh
|
||||
;;
|
||||
esac
|
||||
|
||||
|
||||
@@ -28,7 +28,7 @@ override_dh_auto_install:
|
||||
install -d debian/package-manager/usr/lib/package-manager
|
||||
|
||||
# Install wrapper
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh \
|
||||
install -m0755 scripts/launcher.sh \
|
||||
debian/package-manager/usr/bin/pkgmgr
|
||||
|
||||
# Install Nix bootstrap (init + lib)
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
Name: package-manager
|
||||
Version: 0.9.1
|
||||
Version: 1.13.3
|
||||
Release: 1%{?dist}
|
||||
Summary: Wrapper that runs Kevin's package-manager via Nix flake
|
||||
|
||||
@@ -42,7 +42,7 @@ install -d %{buildroot}/usr/lib/package-manager
|
||||
cp -a . %{buildroot}/usr/lib/package-manager/
|
||||
|
||||
# Wrapper
|
||||
install -m0755 scripts/pkgmgr-wrapper.sh %{buildroot}%{_bindir}/pkgmgr
|
||||
install -m0755 scripts/launcher.sh %{buildroot}%{_bindir}/pkgmgr
|
||||
|
||||
# Nix bootstrap (init + lib)
|
||||
install -d %{buildroot}/usr/lib/package-manager/nix
|
||||
@@ -62,7 +62,7 @@ rm -rf \
|
||||
%{buildroot}/usr/lib/package-manager/.gitkeep || true
|
||||
|
||||
%post
|
||||
/usr/lib/package-manager/nix/init.sh || echo ">>> ERROR: /usr/lib/package-manager/nix/init.sh not found or not executable."
|
||||
/usr/lib/package-manager/nix/init.sh
|
||||
|
||||
%postun
|
||||
echo ">>> package-manager removed. Nix itself was not removed."
|
||||
@@ -74,6 +74,137 @@ echo ">>> package-manager removed. Nix itself was not removed."
|
||||
/usr/lib/package-manager/
|
||||
|
||||
%changelog
|
||||
* Thu Mar 26 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.13.3-1
|
||||
- CI pipelines now include automated security scanning (CodeQL, Docker lint), increasing detection of vulnerabilities and misconfigurations
|
||||
* Workflow permissions were tightened and fixed, ensuring secure and reliable execution of reusable workflows
|
||||
* Publishing and “stable” tagging are now restricted to the `main` branch, preventing accidental releases from other branches
|
||||
* Stale CI runs are automatically cancelled, reducing wasted resources and speeding up feedback cycles
|
||||
* Overall CI reliability and security posture improved, with fewer false positives and more consistent pipeline results
|
||||
|
||||
* Thu Mar 26 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.13.2-1
|
||||
- Fail fast with a clear error when the Nix bootstrap or nix binary is unavailable instead of continuing with a broken startup path.
|
||||
|
||||
* Fri Mar 20 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.13.1-1
|
||||
- Fixed misleading GPG verification failures by adding explicit git and gnupg runtime dependencies and surfacing signing-key lookup errors accurately.
|
||||
|
||||
* Fri Mar 20 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.13.0-1
|
||||
- Set CentOS docker image to latest
|
||||
|
||||
* Tue Feb 24 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.12.5-1
|
||||
- The stable-tag workflow now waits up to two hours for a successful main-branch CI run on the same commit before updating stable.
|
||||
|
||||
* Tue Feb 24 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.12.4-1
|
||||
- The release pipeline now updates the stable tag only for v* tags after a successful CI run on main for the same commit, while avoiding duplicate test executions.
|
||||
|
||||
* Tue Feb 24 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.12.3-1
|
||||
- Stabilized Nix-based builds by switching to nixos-25.11 and committing flake.lock, ensuring reproducible pkgmgr test/runtime environments (with pip) and avoiding transient sphinx/Python 3.11 breakage.
|
||||
|
||||
* Tue Feb 24 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.12.2-1
|
||||
- Removed infinito-sphinx package
|
||||
|
||||
* Sat Feb 14 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.12.1-1
|
||||
- pkgmgr now prefers distro-managed nix binaries on Arch before profile/PATH resolution, preventing libllhttp mismatch failures after pacman system upgrades.
|
||||
|
||||
* Sun Feb 08 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.12.0-1
|
||||
- Adds explicit concurrency groups to the CI and mark-stable workflows to prevent overlapping runs on the same branch and make pipeline execution more predictable.
|
||||
|
||||
* Sun Feb 08 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.11.2-1
|
||||
- Removes the v* tag trigger from the mark-stable workflow so it runs only on branch pushes and avoids duplicate executions during releases.
|
||||
|
||||
* Sun Feb 08 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.11.1-1
|
||||
- Implements pushing the branch and the version tag together in a single command so the CI release workflow can reliably detect the version tag on HEAD.
|
||||
|
||||
* Wed Jan 21 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.11.0-1
|
||||
- Adds a dedicated slim Docker image for pkgmgr and publishes slim variants for all supported distros.
|
||||
|
||||
* Tue Jan 20 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.10.0-1
|
||||
- Automated release.
|
||||
|
||||
* Fri Jan 16 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.9.5-1
|
||||
- Release patch: improve git pull error diagnostics
|
||||
|
||||
* Tue Jan 13 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.9.4-1
|
||||
- fix(ci): replace sudo with su for user switching to avoid PAM failures in minimal container images
|
||||
|
||||
* Wed Jan 07 2026 Kevin Veen-Birkenbach <kevin@veen.world> - 1.9.3-1
|
||||
- Made the Nix dependency optional on non-x86_64 architectures to avoid broken Arch Linux ARM repository packages.
|
||||
|
||||
* Sun Dec 21 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.9.2-1
|
||||
- Default configuration files are now packaged and loaded correctly when no user config exists, while fully preserving custom user configurations.
|
||||
|
||||
* Sun Dec 21 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.9.1-1
|
||||
- Fixed installation issues and improved loading of default configuration files.
|
||||
|
||||
* Sat Dec 20 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.9.0-1
|
||||
- * New ***mirror visibility*** command to set remote Git repositories to ***public*** or ***private***.
|
||||
* New ***--public*** flag for ***mirror provision*** to create repositories and immediately make them public.
|
||||
* All configured git mirrors are now provisioned.
|
||||
|
||||
* Fri Dec 19 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.8.7-1
|
||||
- * **Release version updates now correctly modify ***pyproject.toml*** files that follow PEP 621**, ensuring the ***[project].version*** field is updated as expected.
|
||||
* **Invalid or incomplete ***pyproject.toml*** files are now handled gracefully** with clear error messages instead of abrupt process termination.
|
||||
* **RPM spec files remain compatible during releases**: existing macros such as ***%{?dist}*** are preserved and no longer accidentally modified.
|
||||
|
||||
* Wed Dec 17 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.8.6-1
|
||||
- Prevent Rate Limits during GitHub Nix Setups
|
||||
|
||||
* Wed Dec 17 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.8.5-1
|
||||
- * Clearer Git error handling, especially when a directory is not a Git repository.
|
||||
* More reliable repository verification with improved commit and GPG signature checks.
|
||||
* Better error messages and overall robustness when working with Git-based workflows.
|
||||
|
||||
* Wed Dec 17 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.9.0-1
|
||||
- Automated release.
|
||||
|
||||
* Wed Dec 17 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.8.4-1
|
||||
- * Made pkgmgr’s base-layer role explicit by standardizing the Docker/CI mount path to *`/opt/src/pkgmgr`*.
|
||||
|
||||
* Tue Dec 16 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.8.3-1
|
||||
- MIRRORS now supports plain URL entries, ensuring metadata-only sources like PyPI are recorded without ever being added to the Git configuration.
|
||||
|
||||
* Tue Dec 16 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.8.2-1
|
||||
- * ***pkgmgr tools code*** is more robust and predictable: it now fails early with clear errors if VS Code is not installed or a repository is not yet identified.
|
||||
|
||||
* Tue Dec 16 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.8.1-1
|
||||
- * Improved stability and consistency of all Git operations (clone, pull, push, release, branch handling) with clearer error messages and predictable preview behavior.
|
||||
* Mirrors are now handled cleanly: only valid Git remotes are used for Git operations, while non-Git URLs (e.g. PyPI) are excluded, preventing broken or confusing repository configs.
|
||||
* GitHub authentication is more robust: tokens are automatically resolved via the GitHub CLI (`gh`), invalid stored tokens are replaced, and interactive prompts occur only when necessary.
|
||||
* Repository creation and release workflows are more reliable, producing cleaner Git configurations and more predictable version handling.
|
||||
|
||||
* Mon Dec 15 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.8.0-1
|
||||
- *** New Features: ***
|
||||
- **Silent Updates**: You can now use the `--silent` flag during installs and updates to suppress error messages for individual repositories and get a single summary at the end. This ensures the process continues even if some repositories fail, while still preserving interactive checks when not in silent mode.
|
||||
- **Repository Scaffolding**: The process for creating new repositories has been improved. You can now use templates to scaffold repositories with a preview and automatic mirror setup.
|
||||
|
||||
*** Bug Fixes: ***
|
||||
- **Pip Installation**: Pip is now installed automatically on all supported systems. This includes `python-pip` for Arch and `python3-pip` for CentOS, Debian, Fedora, and Ubuntu, ensuring that pip is available for Python package installations.
|
||||
- **Pacman Keyring**: Fixed an issue on Arch Linux where package installation would fail due to missing keys. The pacman keyring is now properly initialized before installing packages.
|
||||
|
||||
* Mon Dec 15 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.7.2-1
|
||||
- * Git mirrors are now resolved consistently (origin → MIRRORS file → config → default).
|
||||
* The `origin` remote is always enforced to use the primary URL for both fetch and push.
|
||||
* Additional mirrors are added as extra push targets without duplication.
|
||||
* Local and remote mirror setup behaves more predictably and consistently.
|
||||
* Improved test coverage ensures stable origin and push URL handling.
|
||||
|
||||
* Sun Dec 14 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.7.1-1
|
||||
- Patched package-manager to kpmx to publish on pypi
|
||||
|
||||
* Sun Dec 14 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.7.0-1
|
||||
- * New *pkgmgr publish* command to publish repository artifacts to PyPI based on the *MIRRORS* file.
|
||||
* Automatically selects the current repository when no explicit selection is given.
|
||||
* Publishes only when a semantic version tag is present on *HEAD*; otherwise skips with a clear info message.
|
||||
* Supports non-interactive mode for CI environments via *--non-interactive*.
|
||||
|
||||
* Sun Dec 14 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.6.4-1
|
||||
- * Improved reliability of Nix installs and updates, including automatic resolution of profile conflicts and better handling of GitHub 403 rate limits.
|
||||
* More stable launcher behavior in packaged and virtual-env setups.
|
||||
* Enhanced mirror and remote handling: repository owner/name are derived from URLs, with smoother provisioning and clearer credential handling.
|
||||
* More reliable releases and artifacts due to safer CI behavior when no version tag is present.
|
||||
|
||||
* Sun Dec 14 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 1.6.3-1
|
||||
- ***Fixed:*** Corrected repository path resolution so release and version logic consistently use the canonical packaging/* layout, preventing changelog and packaging files from being read or updated from incorrect locations.
|
||||
|
||||
* Wed Dec 10 2025 Kevin Veen-Birkenbach <kevin@veen.world> - 0.9.1-1
|
||||
- * Refactored installer: new `venv-create.sh`, cleaner root/user setup flow, updated README with architecture map.
|
||||
* Split virgin tests into root/user workflows; stabilized Nix installer across distros; improved test scripts with dynamic distro selection and isolated Nix stores.
|
||||
|
||||
@@ -6,8 +6,8 @@ requires = [
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[project]
|
||||
name = "package-manager"
|
||||
version = "1.5.0"
|
||||
name = "kpmx"
|
||||
version = "1.13.3"
|
||||
description = "Kevin's package-manager tool (pkgmgr)"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.9"
|
||||
@@ -19,16 +19,18 @@ authors = [
|
||||
|
||||
# Base runtime dependencies
|
||||
dependencies = [
|
||||
"PyYAML>=6.0"
|
||||
"PyYAML>=6.0",
|
||||
"tomli; python_version < \"3.11\"",
|
||||
"jinja2>=3.1"
|
||||
]
|
||||
|
||||
[project.urls]
|
||||
Homepage = "https://github.com/kevinveenbirkenbach/package-manager"
|
||||
Homepage = "https://s.veen.world/pkgmgr"
|
||||
Source = "https://github.com/kevinveenbirkenbach/package-manager"
|
||||
|
||||
[project.optional-dependencies]
|
||||
keyring = ["keyring>=24.0.0"]
|
||||
dev = [
|
||||
"pytest",
|
||||
"mypy"
|
||||
]
|
||||
|
||||
@@ -41,11 +43,12 @@ pkgmgr = "pkgmgr.cli:main"
|
||||
# -----------------------------
|
||||
# Source layout: all packages live under "src/"
|
||||
[tool.setuptools]
|
||||
package-dir = { "" = "src", "config" = "config" }
|
||||
package-dir = { "" = "src" }
|
||||
include-package-data = true
|
||||
|
||||
[tool.setuptools.packages.find]
|
||||
where = ["src", "."]
|
||||
include = ["pkgmgr*", "config*"]
|
||||
where = ["src"]
|
||||
include = ["pkgmgr*"]
|
||||
|
||||
[tool.setuptools.package-data]
|
||||
"config" = ["defaults.yaml"]
|
||||
"pkgmgr.config" = ["*.yml", "*.yaml"]
|
||||
|
||||
@@ -5,7 +5,7 @@ set -euo pipefail
|
||||
: "${BASE_IMAGE_DEBIAN:=debian:stable-slim}"
|
||||
: "${BASE_IMAGE_UBUNTU:=ubuntu:latest}"
|
||||
: "${BASE_IMAGE_FEDORA:=fedora:latest}"
|
||||
: "${BASE_IMAGE_CENTOS:=quay.io/centos/centos:stream9}"
|
||||
: "${BASE_IMAGE_CENTOS:=quay.io/centos/centos:latest}"
|
||||
|
||||
resolve_base_image() {
|
||||
local PKGMGR_DISTRO="$1"
|
||||
|
||||
@@ -33,7 +33,7 @@ Usage: PKGMGR_DISTRO=<distro> $0 [options]
|
||||
Build options:
|
||||
--missing Build only if the image does not already exist (local build only)
|
||||
--no-cache Build with --no-cache
|
||||
--target <name> Build a specific Dockerfile target (e.g. virgin)
|
||||
--target <name> Build a specific Dockerfile target (e.g. virgin, slim)
|
||||
--tag <image> Override the output image tag (default: ${default_tag})
|
||||
|
||||
Publish options:
|
||||
@@ -47,7 +47,7 @@ Publish options:
|
||||
|
||||
Notes:
|
||||
- --publish implies --push and requires --registry, --owner, and --version.
|
||||
- Local build (no --push) uses "docker build" and creates local images like "pkgmgr-arch" / "pkgmgr-arch-virgin".
|
||||
- Local build (no --push) uses "docker build" and creates local images like "pkgmgr-arch" / "pkgmgr-arch-virgin" / "pkgmgr-arch-slim".
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -57,7 +57,7 @@ while [[ $# -gt 0 ]]; do
|
||||
--missing) MISSING_ONLY=1; shift ;;
|
||||
--target)
|
||||
TARGET="${2:-}"
|
||||
[[ -n "${TARGET}" ]] || { echo "ERROR: --target requires a value (e.g. virgin)"; exit 2; }
|
||||
[[ -n "${TARGET}" ]] || { echo "ERROR: --target requires a value (e.g. virgin|slim)"; exit 2; }
|
||||
shift 2
|
||||
;;
|
||||
--tag)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Publish all distro images (full + virgin) to a registry via image.sh --publish
|
||||
# Publish all distro images (full + virgin + slim) to a registry via image.sh --publish
|
||||
#
|
||||
# Required env:
|
||||
# OWNER (e.g. GITHUB_REPOSITORY_OWNER)
|
||||
@@ -11,6 +11,9 @@ set -euo pipefail
|
||||
# REGISTRY (default: ghcr.io)
|
||||
# IS_STABLE (default: false)
|
||||
# DISTROS (default: "arch debian ubuntu fedora centos")
|
||||
#
|
||||
# Notes:
|
||||
# - This expects Dockerfile targets: virgin, full (default), slim
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
|
||||
@@ -33,7 +36,10 @@ for d in ${DISTROS}; do
|
||||
echo "[publish] PKGMGR_DISTRO=${d}"
|
||||
echo "============================================================"
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# virgin
|
||||
# -> ghcr.io/<owner>/pkgmgr-<distro>-virgin:{latest,<version>,stable?}
|
||||
# ----------------------------------------------------------
|
||||
PKGMGR_DISTRO="${d}" bash "${SCRIPT_DIR}/image.sh" \
|
||||
--publish \
|
||||
--registry "${REGISTRY}" \
|
||||
@@ -42,13 +48,29 @@ for d in ${DISTROS}; do
|
||||
--stable "${IS_STABLE}" \
|
||||
--target virgin
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# full (default target)
|
||||
# -> ghcr.io/<owner>/pkgmgr-<distro>:{latest,<version>,stable?}
|
||||
# ----------------------------------------------------------
|
||||
PKGMGR_DISTRO="${d}" bash "${SCRIPT_DIR}/image.sh" \
|
||||
--publish \
|
||||
--registry "${REGISTRY}" \
|
||||
--owner "${OWNER}" \
|
||||
--version "${VERSION}" \
|
||||
--stable "${IS_STABLE}"
|
||||
|
||||
# ----------------------------------------------------------
|
||||
# slim
|
||||
# -> ghcr.io/<owner>/pkgmgr-<distro>-slim:{latest,<version>,stable?}
|
||||
# + alias for default distro: ghcr.io/<owner>/pkgmgr-slim:{...}
|
||||
# ----------------------------------------------------------
|
||||
PKGMGR_DISTRO="${d}" bash "${SCRIPT_DIR}/image.sh" \
|
||||
--publish \
|
||||
--registry "${REGISTRY}" \
|
||||
--owner "${OWNER}" \
|
||||
--version "${VERSION}" \
|
||||
--stable "${IS_STABLE}" \
|
||||
--target slim
|
||||
done
|
||||
|
||||
echo
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
echo "[docker] Starting package-manager container"
|
||||
echo "[docker-pkgmgr] Starting package-manager container"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Log distribution info
|
||||
@@ -9,19 +9,19 @@ echo "[docker] Starting package-manager container"
|
||||
if [[ -f /etc/os-release ]]; then
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
echo "[docker] Detected distro: ${ID:-unknown} (like: ${ID_LIKE:-})"
|
||||
echo "[docker-pkgmgr] Detected distro: ${ID:-unknown} (like: ${ID_LIKE:-})"
|
||||
fi
|
||||
|
||||
# Always use /src (mounted from host) as working directory
|
||||
echo "[docker] Using /src as working directory"
|
||||
cd /src
|
||||
# Always use /opt/src/pkgmgr (mounted from host) as working directory
|
||||
echo "[docker-pkgmgr] Using /opt/src/pkgmgr as working directory"
|
||||
cd /opt/src/pkgmgr
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# DEV mode: rebuild package-manager from the mounted /src tree
|
||||
# DEV mode: rebuild package-manager from the mounted /opt/src/pkgmgr tree
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ "${REINSTALL_PKGMGR:-0}" == "1" ]]; then
|
||||
echo "[docker] DEV mode enabled (REINSTALL_PKGMGR=1)"
|
||||
echo "[docker] Rebuilding package-manager from /src via scripts/installation/package.sh..."
|
||||
echo "[docker-pkgmgr] DEV mode enabled (REINSTALL_PKGMGR=1)"
|
||||
echo "[docker-pkgmgr] Rebuilding package-manager from /opt/src/pkgmgr via scripts/installation/package.sh..."
|
||||
bash scripts/installation/package.sh || exit 1
|
||||
fi
|
||||
|
||||
@@ -29,9 +29,9 @@ fi
|
||||
# Hand off to pkgmgr or arbitrary command
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "[docker] No arguments provided. Showing pkgmgr help..."
|
||||
echo "[docker-pkgmgr] No arguments provided. Showing pkgmgr help..."
|
||||
exec pkgmgr --help
|
||||
else
|
||||
echo "[docker] Executing command: $*"
|
||||
echo "[docker-pkgmgr] Executing command: $*"
|
||||
exec "$@"
|
||||
fi
|
||||
|
||||
130
scripts/docker/slim.sh
Normal file
130
scripts/docker/slim.sh
Normal file
@@ -0,0 +1,130 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
log() { echo "[cleanup] $*"; }
|
||||
warn() { echo "[cleanup][WARN] $*" >&2; }
|
||||
|
||||
MODE="${MODE:-safe}" # safe | aggressive
|
||||
# safe: caches/logs/tmp only
|
||||
# aggressive: safe + docs/man/info (optional)
|
||||
|
||||
ID="unknown"
|
||||
if [ -f /etc/os-release ]; then
|
||||
# shellcheck disable=SC1091
|
||||
. /etc/os-release
|
||||
ID="${ID:-unknown}"
|
||||
fi
|
||||
|
||||
log "Starting image cleanup"
|
||||
log "Mode: ${MODE}"
|
||||
log "Detected OS: ${ID}"
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Package manager caches (SAFE)
|
||||
# ------------------------------------------------------------
|
||||
case "${ID}" in
|
||||
alpine)
|
||||
log "Cleaning apk cache"
|
||||
if [ -d /var/cache/apk ]; then
|
||||
du -sh /var/cache/apk || true
|
||||
rm -rvf /var/cache/apk/* || true
|
||||
else
|
||||
log "apk cache directory not present (already clean)"
|
||||
fi
|
||||
;;
|
||||
arch)
|
||||
log "Cleaning pacman cache"
|
||||
du -sh /var/cache/pacman/pkg 2>/dev/null || true
|
||||
pacman -Scc --noconfirm || true
|
||||
rm -rvf /var/cache/pacman/pkg/* || true
|
||||
;;
|
||||
debian|ubuntu)
|
||||
log "Cleaning apt cache"
|
||||
du -sh /var/lib/apt/lists 2>/dev/null || true
|
||||
apt-get clean || true
|
||||
rm -rvf /var/lib/apt/lists/* || true
|
||||
;;
|
||||
fedora)
|
||||
log "Cleaning dnf cache"
|
||||
du -sh /var/cache/dnf 2>/dev/null || true
|
||||
dnf clean all || true
|
||||
rm -rvf /var/cache/dnf/* || true
|
||||
;;
|
||||
centos|rhel)
|
||||
log "Cleaning yum/dnf cache"
|
||||
du -sh /var/cache/yum /var/cache/dnf 2>/dev/null || true
|
||||
(command -v dnf >/dev/null 2>&1 && dnf clean all) || true
|
||||
(command -v yum >/dev/null 2>&1 && yum clean all) || true
|
||||
rm -rvf /var/cache/yum/* /var/cache/dnf/* || true
|
||||
;;
|
||||
*)
|
||||
warn "Unknown distro '${ID}' — skipping package manager cleanup"
|
||||
;;
|
||||
esac
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Python caches (SAFE)
|
||||
# ------------------------------------------------------------
|
||||
log "Cleaning pip cache"
|
||||
du -sh /root/.cache/pip 2>/dev/null || true
|
||||
rm -rvf /root/.cache/pip 2>/dev/null || true
|
||||
rm -rvf /home/*/.cache/pip 2>/dev/null || true
|
||||
|
||||
log "Cleaning __pycache__ directories"
|
||||
find /opt /usr /root /home -type d -name "__pycache__" -print -prune 2>/dev/null || true
|
||||
find /opt /usr /root /home -type d -name "__pycache__" -prune -exec rm -rvf {} + 2>/dev/null || true
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Logs (SAFE)
|
||||
# ------------------------------------------------------------
|
||||
log "Truncating log files (keeping paths intact)"
|
||||
if [ -d /var/log ]; then
|
||||
find /var/log -type f -name "*.log" -print 2>/dev/null || true
|
||||
find /var/log -type f -name "*.log" -exec sh -lc ': > "$1" 2>/dev/null || true' _ {} \; 2>/dev/null || true
|
||||
|
||||
find /var/log -type f -name "*.out" -print 2>/dev/null || true
|
||||
find /var/log -type f -name "*.out" -exec sh -lc ': > "$1" 2>/dev/null || true' _ {} \; 2>/dev/null || true
|
||||
fi
|
||||
|
||||
if command -v journalctl >/dev/null 2>&1; then
|
||||
log "Vacuuming journald logs"
|
||||
journalctl --disk-usage || true
|
||||
journalctl --vacuum-size=10M || true
|
||||
journalctl --vacuum-time=1s || true
|
||||
journalctl --disk-usage || true
|
||||
else
|
||||
log "journald not present (skipping)"
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Temporary files (SAFE)
|
||||
# ------------------------------------------------------------
|
||||
log "Cleaning temporary directories"
|
||||
if [ -d /tmp ]; then
|
||||
du -sh /tmp 2>/dev/null || true
|
||||
rm -rvf /tmp/* || true
|
||||
fi
|
||||
|
||||
if [ -d /var/tmp ]; then
|
||||
du -sh /var/tmp 2>/dev/null || true
|
||||
rm -rvf /var/tmp/* || true
|
||||
fi
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Generic caches (SAFE)
|
||||
# ------------------------------------------------------------
|
||||
log "Cleaning generic caches"
|
||||
du -sh /root/.cache 2>/dev/null || true
|
||||
rm -rvf /root/.cache/* 2>/dev/null || true
|
||||
rm -rvf /home/*/.cache/* 2>/dev/null || true
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Optional aggressive extras (still safe for runtime)
|
||||
# ------------------------------------------------------------
|
||||
if [[ "${MODE}" == "aggressive" ]]; then
|
||||
log "Aggressive mode enabled: removing docs/man/info"
|
||||
du -sh /usr/share/doc /usr/share/man /usr/share/info 2>/dev/null || true
|
||||
rm -rvf /usr/share/doc/* /usr/share/man/* /usr/share/info/* 2>/dev/null || true
|
||||
fi
|
||||
|
||||
log "Cleanup finished successfully"
|
||||
14
scripts/github/common/check-tagged-commit-on-main.sh
Normal file
14
scripts/github/common/check-tagged-commit-on-main.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
TARGET_SHA="${TARGET_SHA:-${GITHUB_SHA:?GITHUB_SHA must be set}}"
|
||||
|
||||
git fetch --no-tags origin main
|
||||
|
||||
if git merge-base --is-ancestor "${TARGET_SHA}" "origin/main"; then
|
||||
echo "is_on_main=true" >> "$GITHUB_OUTPUT"
|
||||
echo "Target commit ${TARGET_SHA} is contained in origin/main."
|
||||
else
|
||||
echo "is_on_main=false" >> "$GITHUB_OUTPUT"
|
||||
echo "Target commit ${TARGET_SHA} is not contained in origin/main. Skipping main-only action."
|
||||
fi
|
||||
43
scripts/github/mark-stable/mark-stable-if-highest-version.sh
Normal file
43
scripts/github/mark-stable/mark-stable-if-highest-version.sh
Normal file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
echo "Ref: $GITHUB_REF"
|
||||
echo "SHA: $GITHUB_SHA"
|
||||
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
echo "Current version tag: ${VERSION}"
|
||||
|
||||
echo "Collecting all version tags..."
|
||||
ALL_V_TAGS="$(git tag --list 'v*' || true)"
|
||||
|
||||
if [[ -z "${ALL_V_TAGS}" ]]; then
|
||||
echo "No version tags found. Skipping stable update."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "All version tags:"
|
||||
echo "${ALL_V_TAGS}"
|
||||
|
||||
LATEST_TAG="$(printf '%s\n' "${ALL_V_TAGS}" | sort -V | tail -n1)"
|
||||
|
||||
echo "Highest version tag: ${LATEST_TAG}"
|
||||
|
||||
if [[ "${VERSION}" != "${LATEST_TAG}" ]]; then
|
||||
echo "Current version ${VERSION} is NOT the highest version."
|
||||
echo "Stable tag will NOT be updated."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Current version ${VERSION} IS the highest version."
|
||||
echo "Updating 'stable' tag..."
|
||||
|
||||
git tag -d stable 2>/dev/null || true
|
||||
git push origin :refs/tags/stable || true
|
||||
|
||||
git tag stable "$GITHUB_SHA"
|
||||
git push origin stable
|
||||
|
||||
echo "Stable tag updated to ${VERSION}."
|
||||
43
scripts/github/mark-stable/wait-for-main-ci-success.sh
Normal file
43
scripts/github/mark-stable/wait-for-main-ci-success.sh
Normal file
@@ -0,0 +1,43 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SHA="${GITHUB_SHA}"
|
||||
API_URL="https://api.github.com/repos/${GITHUB_REPOSITORY}/actions/workflows/ci.yml/runs?head_sha=${SHA}&event=push&per_page=20"
|
||||
WAIT_INTERVAL_SECONDS=20
|
||||
MAX_ATTEMPTS=990 # 5 hours 30 minutes max wait
|
||||
|
||||
STATUS=""
|
||||
CONCLUSION=""
|
||||
|
||||
echo "Waiting for CI on main for ${SHA} (up to 5 hours 30 minutes)..."
|
||||
for attempt in $(seq 1 "${MAX_ATTEMPTS}"); do
|
||||
RESPONSE="$(curl -fsSL \
|
||||
-H "Authorization: Bearer ${GH_TOKEN}" \
|
||||
-H "Accept: application/vnd.github+json" \
|
||||
"${API_URL}")"
|
||||
|
||||
STATUS="$(printf '%s' "${RESPONSE}" | jq -r '.workflow_runs[] | select(.head_branch=="main") | .status' | head -n1)"
|
||||
CONCLUSION="$(printf '%s' "${RESPONSE}" | jq -r '.workflow_runs[] | select(.head_branch=="main") | .conclusion' | head -n1)"
|
||||
|
||||
if [[ -n "${STATUS}" ]]; then
|
||||
echo "CI status=${STATUS} conclusion=${CONCLUSION:-none} (attempt ${attempt}/${MAX_ATTEMPTS})"
|
||||
else
|
||||
echo "No CI run for main found yet (attempt ${attempt}/${MAX_ATTEMPTS})"
|
||||
fi
|
||||
|
||||
if [[ "${STATUS}" == "completed" ]]; then
|
||||
if [[ "${CONCLUSION}" == "success" ]]; then
|
||||
echo "CI succeeded for ${SHA}."
|
||||
break
|
||||
fi
|
||||
echo "CI failed for ${SHA} (conclusion=${CONCLUSION})."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep "${WAIT_INTERVAL_SECONDS}"
|
||||
done
|
||||
|
||||
if [[ "${STATUS}" != "completed" || "${CONCLUSION}" != "success" ]]; then
|
||||
echo "Timed out waiting for successful CI on main for ${SHA}."
|
||||
exit 1
|
||||
fi
|
||||
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
WORKFLOW_RUN_SHA="${WORKFLOW_RUN_SHA:?WORKFLOW_RUN_SHA must be set}"
|
||||
|
||||
git checkout -f "${WORKFLOW_RUN_SHA}"
|
||||
git fetch --tags --force
|
||||
git tag --list 'stable' 'v*' --sort=version:refname | tail -n 20
|
||||
@@ -0,0 +1,23 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
SHA="$(git rev-parse HEAD)"
|
||||
|
||||
V_TAG="$(git tag --points-at "${SHA}" --list 'v*' | sort -V | tail -n1)"
|
||||
if [[ -z "${V_TAG}" ]]; then
|
||||
echo "No version tag found for ${SHA}. Skipping publish."
|
||||
echo "should_publish=false" >> "$GITHUB_OUTPUT"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${V_TAG#v}"
|
||||
|
||||
STABLE_SHA="$(git rev-parse -q --verify 'refs/tags/stable^{commit}' 2>/dev/null || true)"
|
||||
IS_STABLE=false
|
||||
[[ -n "${STABLE_SHA}" && "${STABLE_SHA}" == "${SHA}" ]] && IS_STABLE=true
|
||||
|
||||
{
|
||||
echo "should_publish=true"
|
||||
echo "version=${VERSION}"
|
||||
echo "is_stable=${IS_STABLE}"
|
||||
} >> "$GITHUB_OUTPUT"
|
||||
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
: "${OWNER:?OWNER must be set}"
|
||||
: "${VERSION:?VERSION must be set}"
|
||||
: "${IS_STABLE:?IS_STABLE must be set}"
|
||||
|
||||
bash scripts/build/publish.sh
|
||||
@@ -38,11 +38,7 @@ echo "[aur-builder-setup] Configuring sudoers for aur_builder..."
|
||||
${ROOT_CMD} bash -c "echo '%aur_builder ALL=(ALL) NOPASSWD: /usr/bin/pacman' > /etc/sudoers.d/aur_builder"
|
||||
${ROOT_CMD} chmod 0440 /etc/sudoers.d/aur_builder
|
||||
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
RUN_AS_AUR=(sudo -u aur_builder bash -lc)
|
||||
else
|
||||
RUN_AS_AUR=(su - aur_builder -c)
|
||||
fi
|
||||
RUN_AS_AUR=(su - aur_builder -s /bin/bash -c)
|
||||
|
||||
echo "[aur-builder-setup] Ensuring yay is installed for aur_builder..."
|
||||
|
||||
|
||||
@@ -6,13 +6,22 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
echo "[arch/dependencies] Installing Arch build dependencies..."
|
||||
|
||||
pacman -Syu --noconfirm
|
||||
|
||||
if ! pacman-key --list-sigs &>/dev/null; then
|
||||
echo "[arch/dependencies] Initializing pacman keyring..."
|
||||
pacman-key --init
|
||||
pacman-key --populate archlinux
|
||||
fi
|
||||
|
||||
pacman -S --noconfirm --needed \
|
||||
base-devel \
|
||||
git \
|
||||
gnupg \
|
||||
rsync \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python \
|
||||
python-pip \
|
||||
xz
|
||||
|
||||
pacman -Scc --noconfirm
|
||||
|
||||
@@ -6,7 +6,7 @@ echo "[arch/package] Building Arch package (makepkg --nodeps) in an isolated bui
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../../.." && pwd)"
|
||||
|
||||
# We must not build inside /src (mounted repo). Build in /tmp to avoid permission issues.
|
||||
# We must not build inside /opt/src/pkgmgr (mounted repo). Build in /tmp to avoid permission issues.
|
||||
BUILD_ROOT="/tmp/package-manager-arch-build"
|
||||
PKG_SRC_DIR="${PROJECT_ROOT}/packaging/arch"
|
||||
PKG_BUILD_DIR="${BUILD_ROOT}/packaging/arch"
|
||||
|
||||
@@ -6,6 +6,7 @@ echo "[centos/dependencies] Installing CentOS build dependencies..."
|
||||
dnf -y update
|
||||
dnf -y install \
|
||||
git \
|
||||
gnupg2 \
|
||||
rsync \
|
||||
rpm-build \
|
||||
make \
|
||||
@@ -14,6 +15,7 @@ dnf -y install \
|
||||
curl-minimal \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip \
|
||||
sudo \
|
||||
xz
|
||||
|
||||
|
||||
@@ -9,12 +9,14 @@ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
debhelper \
|
||||
dpkg-dev \
|
||||
git \
|
||||
gnupg \
|
||||
rsync \
|
||||
bash \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
xz-utils
|
||||
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
@@ -6,6 +6,7 @@ echo "[fedora/dependencies] Installing Fedora build dependencies..."
|
||||
dnf -y update
|
||||
dnf -y install \
|
||||
git \
|
||||
gnupg2 \
|
||||
rsync \
|
||||
rpm-build \
|
||||
make \
|
||||
@@ -14,6 +15,7 @@ dnf -y install \
|
||||
curl \
|
||||
ca-certificates \
|
||||
python3 \
|
||||
python3-pip \
|
||||
xz
|
||||
|
||||
dnf clean all
|
||||
|
||||
@@ -9,6 +9,7 @@ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
debhelper \
|
||||
dpkg-dev \
|
||||
git \
|
||||
gnupg \
|
||||
tzdata \
|
||||
lsb-release \
|
||||
rsync \
|
||||
@@ -17,6 +18,7 @@ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
|
||||
make \
|
||||
python3 \
|
||||
python3-venv \
|
||||
python3-pip \
|
||||
ca-certificates \
|
||||
xz-utils
|
||||
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# Ensure NIX_CONFIG has our defaults if not already set
|
||||
if [[ -z "${NIX_CONFIG:-}" ]]; then
|
||||
export NIX_CONFIG="experimental-features = nix-command flakes"
|
||||
fi
|
||||
|
||||
FLAKE_DIR="/usr/lib/package-manager"
|
||||
NIX_LIB_DIR="${FLAKE_DIR}/nix/lib"
|
||||
RETRY_LIB="${NIX_LIB_DIR}/retry_403.sh"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hard requirement: retry helper must exist (fail if missing)
|
||||
# ---------------------------------------------------------------------------
|
||||
if [[ ! -f "${RETRY_LIB}" ]]; then
|
||||
echo "[launcher] ERROR: Required retry helper not found: ${RETRY_LIB}" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Try to ensure that "nix" is on PATH (common locations + container user)
|
||||
@@ -32,17 +37,23 @@ fi
|
||||
# ---------------------------------------------------------------------------
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
if [[ -x "${FLAKE_DIR}/nix/init.sh" ]]; then
|
||||
"${FLAKE_DIR}/nix/init.sh" || true
|
||||
"${FLAKE_DIR}/nix/init.sh"
|
||||
fi
|
||||
fi
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Primary path: use Nix flake if available
|
||||
# ---------------------------------------------------------------------------
|
||||
if command -v nix >/dev/null 2>&1; then
|
||||
exec nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "[launcher] ERROR: 'nix' binary not found on PATH after init." >&2
|
||||
echo "[launcher] Nix is required to run pkgmgr (no Python fallback)." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[pkgmgr-wrapper] ERROR: 'nix' binary not found on PATH after init."
|
||||
echo "[pkgmgr-wrapper] Nix is required to run pkgmgr (no Python fallback)."
|
||||
exit 1
|
||||
# ---------------------------------------------------------------------------
|
||||
# Primary path: use Nix flake if available (with GitHub 403 retry)
|
||||
# ---------------------------------------------------------------------------
|
||||
if declare -F run_with_github_403_retry >/dev/null; then
|
||||
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||
source "${RETRY_LIB}"
|
||||
exec run_with_github_403_retry nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||
else
|
||||
exec nix run "${FLAKE_DIR}#pkgmgr" -- "$@"
|
||||
fi
|
||||
@@ -49,11 +49,7 @@ install_nix_with_retry() {
|
||||
if [[ -n "$run_as" ]]; then
|
||||
chown "$run_as:$run_as" "$installer" 2>/dev/null || true
|
||||
echo "[init-nix] Running installer as user '$run_as' ($mode_flag)..."
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
sudo -u "$run_as" bash -lc "sh '$installer' $mode_flag"
|
||||
else
|
||||
su - "$run_as" -c "sh '$installer' $mode_flag"
|
||||
fi
|
||||
su - "$run_as" -s /bin/bash -c "bash -lc \"sh '$installer' $mode_flag\""
|
||||
else
|
||||
echo "[init-nix] Running installer as current user ($mode_flag)..."
|
||||
sh "$installer" "$mode_flag"
|
||||
|
||||
@@ -11,45 +11,79 @@ nixconf_file_path() {
|
||||
echo "/etc/nix/nix.conf"
|
||||
}
|
||||
|
||||
nixconf_ensure_experimental_features() {
|
||||
local nix_conf want
|
||||
nix_conf="$(nixconf_file_path)"
|
||||
want="experimental-features = nix-command flakes"
|
||||
# Ensure a given nix.conf key contains required tokens (merged, no duplicates)
|
||||
nixconf_ensure_features_key() {
|
||||
local nix_conf="$1"
|
||||
local key="$2"
|
||||
shift 2
|
||||
local required=("$@")
|
||||
|
||||
mkdir -p /etc/nix
|
||||
|
||||
# Create file if missing (with just the required tokens)
|
||||
if [[ ! -f "${nix_conf}" ]]; then
|
||||
local want="${key} = ${required[*]}"
|
||||
echo "[nix-conf] Creating ${nix_conf} with: ${want}"
|
||||
printf "%s\n" "${want}" >"${nix_conf}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
if grep -qE '^\s*experimental-features\s*=' "${nix_conf}"; then
|
||||
if grep -qE '^\s*experimental-features\s*=.*\bnix-command\b' "${nix_conf}" \
|
||||
&& grep -qE '^\s*experimental-features\s*=.*\bflakes\b' "${nix_conf}"; then
|
||||
echo "[nix-conf] experimental-features already correct"
|
||||
# Key exists -> merge tokens
|
||||
if grep -qE "^\s*${key}\s*=" "${nix_conf}"; then
|
||||
local ok=1
|
||||
local t
|
||||
for t in "${required[@]}"; do
|
||||
if ! grep -qE "^\s*${key}\s*=.*\b${t}\b" "${nix_conf}"; then
|
||||
ok=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ "$ok" -eq 1 ]]; then
|
||||
echo "[nix-conf] ${key} already correct"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "[nix-conf] Extending experimental-features in ${nix_conf}"
|
||||
echo "[nix-conf] Extending ${key} in ${nix_conf}"
|
||||
|
||||
local current
|
||||
current="$(grep -E '^\s*experimental-features\s*=' "${nix_conf}" | head -n1 | cut -d= -f2-)"
|
||||
current="$(grep -E "^\s*${key}\s*=" "${nix_conf}" | head -n1 | cut -d= -f2-)"
|
||||
current="$(echo "${current}" | xargs)" # trim
|
||||
|
||||
# Build a merged feature string without duplicates (simple token set)
|
||||
local merged="nix-command flakes"
|
||||
local merged=""
|
||||
local token
|
||||
|
||||
# Start with existing tokens
|
||||
for token in ${current}; do
|
||||
if [[ " ${merged} " != *" ${token} "* ]]; then
|
||||
merged="${merged} ${token}"
|
||||
fi
|
||||
done
|
||||
|
||||
sed -i "s|^\s*experimental-features\s*=.*|experimental-features = ${merged}|" "${nix_conf}"
|
||||
# Add required tokens
|
||||
for token in "${required[@]}"; do
|
||||
if [[ " ${merged} " != *" ${token} "* ]]; then
|
||||
merged="${merged} ${token}"
|
||||
fi
|
||||
done
|
||||
|
||||
merged="$(echo "${merged}" | xargs)" # trim
|
||||
|
||||
sed -i "s|^\s*${key}\s*=.*|${key} = ${merged}|" "${nix_conf}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Key missing -> append
|
||||
local want="${key} = ${required[*]}"
|
||||
echo "[nix-conf] Appending to ${nix_conf}: ${want}"
|
||||
printf "\n%s\n" "${want}" >>"${nix_conf}"
|
||||
}
|
||||
|
||||
nixconf_ensure_experimental_features() {
|
||||
local nix_conf
|
||||
nix_conf="$(nixconf_file_path)"
|
||||
|
||||
# Ensure both keys to avoid prompts and cover older/alternate expectations
|
||||
nixconf_ensure_features_key "${nix_conf}" "experimental-features" "nix-command" "flakes"
|
||||
nixconf_ensure_features_key "${nix_conf}" "extra-experimental-features" "nix-command" "flakes"
|
||||
}
|
||||
|
||||
@@ -36,16 +36,17 @@ real_exe() {
|
||||
|
||||
# Resolve nix binary path robustly (works across distros + Arch /usr/sbin)
|
||||
resolve_nix_bin() {
|
||||
local nix_cmd=""
|
||||
nix_cmd="$(command -v nix 2>/dev/null || true)"
|
||||
[[ -n "$nix_cmd" ]] && real_exe "$nix_cmd" && return 0
|
||||
|
||||
# IMPORTANT: prefer system locations before /usr/local to avoid self-symlink traps
|
||||
# IMPORTANT: prefer distro-managed locations first.
|
||||
# This avoids pinning /usr/local/bin/nix to a stale user-profile nix binary.
|
||||
[[ -x /usr/sbin/nix ]] && { echo "/usr/sbin/nix"; return 0; } # Arch package can land here
|
||||
[[ -x /usr/bin/nix ]] && { echo "/usr/bin/nix"; return 0; }
|
||||
[[ -x /bin/nix ]] && { echo "/bin/nix"; return 0; }
|
||||
|
||||
# /usr/local last, and only if it resolves to a real executable
|
||||
local nix_cmd=""
|
||||
nix_cmd="$(command -v nix 2>/dev/null || true)"
|
||||
[[ -n "$nix_cmd" ]] && real_exe "$nix_cmd" && return 0
|
||||
|
||||
# /usr/local after system locations, and only if it resolves to a real executable
|
||||
[[ -e /usr/local/bin/nix ]] && real_exe "/usr/local/bin/nix" && return 0
|
||||
|
||||
[[ -x /nix/var/nix/profiles/default/bin/nix ]] && {
|
||||
|
||||
52
scripts/nix/lib/retry_403.sh
Executable file
52
scripts/nix/lib/retry_403.sh
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
if [[ -n "${PKGMGR_NIX_RETRY_403_SH:-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
PKGMGR_NIX_RETRY_403_SH=1
|
||||
|
||||
# Retry only when we see the GitHub API rate limit 403 error during nix flake evaluation.
|
||||
# Retries 7 times with delays: 10, 30, 50, 80, 130, 210, 420 seconds.
|
||||
run_with_github_403_retry() {
|
||||
local -a delays=(10 30 50 80 130 210 420)
|
||||
local attempt=0
|
||||
local max_retries="${#delays[@]}"
|
||||
|
||||
while true; do
|
||||
local err tmp
|
||||
tmp="$(mktemp -t nix-err.XXXXXX)"
|
||||
err=0
|
||||
|
||||
# Run the command; capture stderr for inspection while preserving stdout.
|
||||
if "$@" 2>"$tmp"; then
|
||||
rm -f "$tmp"
|
||||
return 0
|
||||
else
|
||||
err=$?
|
||||
fi
|
||||
|
||||
# Only retry on the specific GitHub API rate limit 403 case.
|
||||
if grep -qE 'HTTP error 403' "$tmp" && grep -qiE 'API rate limit exceeded|api\.github\.com' "$tmp"; then
|
||||
if (( attempt >= max_retries )); then
|
||||
cat "$tmp" >&2
|
||||
rm -f "$tmp"
|
||||
return "$err"
|
||||
fi
|
||||
|
||||
local sleep_s="${delays[$attempt]}"
|
||||
attempt=$((attempt + 1))
|
||||
|
||||
echo "[nix-retry] GitHub API rate-limit (403). Retry ${attempt}/${max_retries} in ${sleep_s}s: $*" >&2
|
||||
cat "$tmp" >&2
|
||||
rm -f "$tmp"
|
||||
sleep "$sleep_s"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Not our retry case -> fail fast with original stderr.
|
||||
cat "$tmp" >&2
|
||||
rm -f "$tmp"
|
||||
return "$err"
|
||||
done
|
||||
}
|
||||
@@ -6,12 +6,13 @@ echo ">>> Running E2E tests: $PKGMGR_DISTRO"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "$(pwd):/opt/src/pkgmgr" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
--workdir /src \
|
||||
-e NIX_CONFIG="${NIX_CONFIG}" \
|
||||
--workdir /opt/src/pkgmgr \
|
||||
"pkgmgr-${PKGMGR_DISTRO}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
@@ -40,21 +41,21 @@ docker run --rm \
|
||||
}
|
||||
|
||||
# Mark the mounted repository as safe to avoid Git ownership errors.
|
||||
# Newer Git (e.g. on Ubuntu) complains about the gitdir (/src/.git),
|
||||
# older versions about the worktree (/src). Nix turns "." into the
|
||||
# flake input "git+file:///src", which then uses Git under the hood.
|
||||
# Newer Git (e.g. on Ubuntu) complains about the gitdir (/opt/src/pkgmgr/.git),
|
||||
# older versions about the worktree (/opt/src/pkgmgr). Nix turns "." into the
|
||||
# flake input "git+file:///opt/src/pkgmgr", which then uses Git under the hood.
|
||||
if command -v git >/dev/null 2>&1; then
|
||||
# Worktree path
|
||||
git config --global --add safe.directory /src || true
|
||||
git config --global --add safe.directory /opt/src/pkgmgr || true
|
||||
# Gitdir path shown in the "dubious ownership" error
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
git config --global --add safe.directory /opt/src/pkgmgr/.git || true
|
||||
# Ephemeral CI containers: allow all paths as a last resort
|
||||
git config --global --add safe.directory '*' || true
|
||||
git config --global --add safe.directory "*" || true
|
||||
fi
|
||||
|
||||
# Run the E2E tests inside the Nix development shell
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python3 -m unittest discover \
|
||||
-s /src/tests/e2e \
|
||||
-s /opt/src/pkgmgr/tests/e2e \
|
||||
-p "$TEST_PATTERN"
|
||||
'
|
||||
|
||||
@@ -9,25 +9,26 @@ echo ">>> Image: ${IMAGE}"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "$(pwd):/opt/src/pkgmgr" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
--workdir /src \
|
||||
--workdir /opt/src/pkgmgr \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e NIX_CONFIG="${NIX_CONFIG}" \
|
||||
"${IMAGE}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
if command -v git >/dev/null 2>&1; then
|
||||
git config --global --add safe.directory /src || true
|
||||
git config --global --add safe.directory /src/.git || true
|
||||
git config --global --add safe.directory /opt/src/pkgmgr || true
|
||||
git config --global --add safe.directory /opt/src/pkgmgr/.git || true
|
||||
git config --global --add safe.directory "*" || true
|
||||
fi
|
||||
|
||||
echo ">>> preflight: nix must exist in image"
|
||||
if ! command -v nix >/dev/null 2>&1; then
|
||||
echo "NO_NIX"
|
||||
echo "ERROR: nix not found in image '\'''"${IMAGE}"''\'' (PKGMGR_DISTRO='"${PKGMGR_DISTRO}"')"
|
||||
echo "ERROR: nix not found in image '"${IMAGE}"' (PKGMGR_DISTRO='"${PKGMGR_DISTRO}"')"
|
||||
echo "HINT: Ensure Nix is installed during image build for this distro."
|
||||
exit 1
|
||||
fi
|
||||
@@ -35,14 +36,28 @@ docker run --rm \
|
||||
echo ">>> nix version"
|
||||
nix --version
|
||||
|
||||
# ------------------------------------------------------------
|
||||
# Retry helper for GitHub API rate-limit (HTTP 403)
|
||||
# ------------------------------------------------------------
|
||||
if [[ -f /opt/src/pkgmgr/scripts/nix/lib/retry_403.sh ]]; then
|
||||
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||
source /opt/src/pkgmgr/scripts/nix/lib/retry_403.sh
|
||||
elif [[ -f ./scripts/nix/lib/retry_403.sh ]]; then
|
||||
# shellcheck source=./scripts/nix/lib/retry_403.sh
|
||||
source ./scripts/nix/lib/retry_403.sh
|
||||
else
|
||||
echo "ERROR: retry helper not found: scripts/nix/lib/retry_403.sh"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ">>> nix flake show"
|
||||
nix flake show . --no-write-lock-file >/dev/null
|
||||
run_with_github_403_retry nix flake show . --no-write-lock-file >/dev/null
|
||||
|
||||
echo ">>> nix build .#default"
|
||||
nix build .#default --no-link --no-write-lock-file
|
||||
run_with_github_403_retry nix build .#default --no-link --no-write-lock-file
|
||||
|
||||
echo ">>> nix run .#pkgmgr -- --help"
|
||||
nix run .#pkgmgr -- --help --no-write-lock-file
|
||||
run_with_github_403_retry nix run .#pkgmgr -- --help --no-write-lock-file
|
||||
|
||||
echo ">>> OK: Nix flake-only test succeeded."
|
||||
'
|
||||
|
||||
@@ -1,32 +1,50 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="pkgmgr-$PKGMGR_DISTRO"
|
||||
IMAGE="pkgmgr-${PKGMGR_DISTRO}"
|
||||
|
||||
echo
|
||||
echo "------------------------------------------------------------"
|
||||
echo ">>> Testing VENV: $IMAGE"
|
||||
echo ">>> Testing VENV: ${IMAGE}"
|
||||
echo "------------------------------------------------------------"
|
||||
|
||||
echo "[test-env-virtual] Inspect image metadata:"
|
||||
docker image inspect "$IMAGE" | sed -n '1,40p'
|
||||
|
||||
echo "[test-env-virtual] Running: docker run --rm --entrypoint pkgmgr $IMAGE --help"
|
||||
docker image inspect "${IMAGE}" | sed -n '1,40p'
|
||||
echo
|
||||
|
||||
# Run the command and capture the output
|
||||
# ------------------------------------------------------------
|
||||
# Run VENV-based pkgmgr test inside container
|
||||
# ------------------------------------------------------------
|
||||
if OUTPUT=$(docker run --rm \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "$(pwd):/src" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
"$IMAGE" 2>&1); then
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-v "$(pwd):/opt/src/pkgmgr" \
|
||||
-w /opt/src/pkgmgr \
|
||||
-e NIX_CONFIG="${NIX_CONFIG}" \
|
||||
"${IMAGE}" \
|
||||
bash -lc '
|
||||
set -euo pipefail
|
||||
|
||||
echo "[test-env-virtual] Installing pkgmgr (distro package)..."
|
||||
make install
|
||||
|
||||
echo "[test-env-virtual] Setting up Python venv..."
|
||||
make setup-venv
|
||||
|
||||
echo "[test-env-virtual] Activating venv..."
|
||||
. "$HOME/.venvs/pkgmgr/bin/activate"
|
||||
|
||||
echo "[test-env-virtual] Using pkgmgr from:"
|
||||
command -v pkgmgr
|
||||
pkgmgr --help
|
||||
' 2>&1); then
|
||||
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-env-virtual] SUCCESS: $IMAGE responded to 'pkgmgr --help'"
|
||||
echo "[test-env-virtual] SUCCESS: venv-based pkgmgr works in ${IMAGE}"
|
||||
|
||||
else
|
||||
echo "$OUTPUT"
|
||||
echo
|
||||
echo "[test-env-virtual] ERROR: $IMAGE failed to run 'pkgmgr --help'"
|
||||
echo "[test-env-virtual] ERROR: venv-based pkgmgr failed in ${IMAGE}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -6,19 +6,20 @@ echo ">>> Running INTEGRATION tests in ${PKGMGR_DISTRO} container"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "$(pwd):/opt/src/pkgmgr" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
--workdir /src \
|
||||
--workdir /opt/src/pkgmgr \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
-e NIX_CONFIG="${NIX_CONFIG}" \
|
||||
"pkgmgr-${PKGMGR_DISTRO}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
git config --global --add safe.directory /opt/src/pkgmgr || true;
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python3 -m unittest discover \
|
||||
-s tests/integration \
|
||||
-t /src \
|
||||
-t /opt/src/pkgmgr \
|
||||
-p "$TEST_PATTERN";
|
||||
'
|
||||
|
||||
@@ -6,19 +6,20 @@ echo ">>> Running UNIT tests in ${PKGMGR_DISTRO} container"
|
||||
echo "============================================================"
|
||||
|
||||
docker run --rm \
|
||||
-v "$(pwd):/src" \
|
||||
-v "$(pwd):/opt/src/pkgmgr" \
|
||||
-v "pkgmgr_nix_cache_${PKGMGR_DISTRO}:/root/.cache/nix" \
|
||||
-v "pkgmgr_nix_store_${PKGMGR_DISTRO}:/nix" \
|
||||
--workdir /src \
|
||||
--workdir /opt/src/pkgmgr \
|
||||
-e REINSTALL_PKGMGR=1 \
|
||||
-e TEST_PATTERN="${TEST_PATTERN}" \
|
||||
-e NIX_CONFIG="${NIX_CONFIG}" \
|
||||
"pkgmgr-${PKGMGR_DISTRO}" \
|
||||
bash -lc '
|
||||
set -e;
|
||||
git config --global --add safe.directory /src || true;
|
||||
git config --global --add safe.directory /opt/src/pkgmgr || true;
|
||||
nix develop .#default --no-write-lock-file -c \
|
||||
python3 -m unittest discover \
|
||||
-s tests/unit \
|
||||
-t /src \
|
||||
-t /opt/src/pkgmgr \
|
||||
-p "$TEST_PATTERN";
|
||||
'
|
||||
|
||||
@@ -25,12 +25,12 @@ __all__ = ["cli"]
|
||||
|
||||
|
||||
def __getattr__(name: str) -> Any:
|
||||
"""
|
||||
Lazily expose ``pkgmgr.cli`` as attribute on the top-level package.
|
||||
"""
|
||||
Lazily expose ``pkgmgr.cli`` as attribute on the top-level package.
|
||||
|
||||
This keeps ``import pkgmgr`` lightweight while still allowing
|
||||
``from pkgmgr import cli`` in tests and entry points.
|
||||
"""
|
||||
if name == "cli":
|
||||
return import_module("pkgmgr.cli")
|
||||
raise AttributeError(f"module 'pkgmgr' has no attribute {name!r}")
|
||||
This keeps ``import pkgmgr`` lightweight while still allowing
|
||||
``from pkgmgr import cli`` in tests and entry points.
|
||||
"""
|
||||
if name == "cli":
|
||||
return import_module("pkgmgr.cli")
|
||||
raise AttributeError(f"module 'pkgmgr' has no attribute {name!r}")
|
||||
|
||||
@@ -0,0 +1,6 @@
|
||||
from __future__ import annotations
|
||||
|
||||
# expose subpackages for patch() / resolve_name() friendliness
|
||||
from . import release as release # noqa: F401
|
||||
|
||||
__all__ = ["release"]
|
||||
|
||||
@@ -1,7 +1,21 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
from pkgmgr.core.git.errors import GitRunError
|
||||
from pkgmgr.core.git.queries import get_current_branch
|
||||
from pkgmgr.core.git.commands import (
|
||||
GitDeleteRemoteBranchError,
|
||||
checkout,
|
||||
delete_local_branch,
|
||||
delete_remote_branch,
|
||||
fetch,
|
||||
merge_no_ff,
|
||||
pull,
|
||||
push,
|
||||
)
|
||||
|
||||
from pkgmgr.core.git.queries import resolve_base_branch
|
||||
|
||||
|
||||
def close_branch(
|
||||
@@ -14,18 +28,17 @@ def close_branch(
|
||||
"""
|
||||
Merge a feature branch into the base branch and delete it afterwards.
|
||||
"""
|
||||
|
||||
# Determine branch name
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
except GitRunError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
target_base = resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
@@ -35,65 +48,31 @@ def close_branch(
|
||||
|
||||
# Confirmation
|
||||
if not force:
|
||||
answer = input(
|
||||
f"Merge branch '{name}' into '{target_base}' and delete it afterwards? (y/N): "
|
||||
).strip().lower()
|
||||
answer = (
|
||||
input(
|
||||
f"Merge branch '{name}' into '{target_base}' and delete it afterwards? (y/N): "
|
||||
)
|
||||
.strip()
|
||||
.lower()
|
||||
)
|
||||
if answer != "y":
|
||||
print("Aborted closing branch.")
|
||||
return
|
||||
|
||||
# Fetch
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before closing branch {name!r}: {exc}"
|
||||
) from exc
|
||||
# Execute workflow (commands raise specific GitRunError subclasses)
|
||||
fetch("origin", cwd=cwd)
|
||||
checkout(target_base, cwd=cwd)
|
||||
pull("origin", target_base, cwd=cwd)
|
||||
merge_no_ff(name, cwd=cwd)
|
||||
push("origin", target_base, cwd=cwd)
|
||||
|
||||
# Checkout base
|
||||
try:
|
||||
run_git(["checkout", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
# Delete local branch (safe delete by default)
|
||||
delete_local_branch(name, cwd=cwd, force=False)
|
||||
|
||||
# Pull latest
|
||||
# Delete remote branch (special-case error message)
|
||||
try:
|
||||
run_git(["pull", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Merge
|
||||
try:
|
||||
run_git(["merge", "--no-ff", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to merge branch {name!r} into {target_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Push result
|
||||
try:
|
||||
run_git(["push", "origin", target_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push base branch {target_base!r} after merge: {exc}"
|
||||
) from exc
|
||||
|
||||
# Delete local
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to delete local branch {name!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# Delete remote
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
delete_remote_branch("origin", name, cwd=cwd)
|
||||
except GitDeleteRemoteBranchError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError, get_current_branch
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
from pkgmgr.core.git.errors import GitRunError
|
||||
from pkgmgr.core.git.queries import get_current_branch
|
||||
from pkgmgr.core.git.commands import (
|
||||
GitDeleteRemoteBranchError,
|
||||
delete_local_branch,
|
||||
delete_remote_branch,
|
||||
)
|
||||
|
||||
from pkgmgr.core.git.queries import resolve_base_branch
|
||||
|
||||
|
||||
def drop_branch(
|
||||
@@ -14,17 +23,16 @@ def drop_branch(
|
||||
"""
|
||||
Delete a branch locally and remotely without merging.
|
||||
"""
|
||||
|
||||
if not name:
|
||||
try:
|
||||
name = get_current_branch(cwd=cwd)
|
||||
except GitError as exc:
|
||||
except GitRunError as exc:
|
||||
raise RuntimeError(f"Failed to detect current branch: {exc}") from exc
|
||||
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
target_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
target_base = resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
if name == target_base:
|
||||
raise RuntimeError(
|
||||
@@ -33,23 +41,23 @@ def drop_branch(
|
||||
|
||||
# Confirmation
|
||||
if not force:
|
||||
answer = input(
|
||||
f"Delete branch '{name}' locally and on origin? This is destructive! (y/N): "
|
||||
).strip().lower()
|
||||
answer = (
|
||||
input(
|
||||
f"Delete branch '{name}' locally and on origin? This is destructive! (y/N): "
|
||||
)
|
||||
.strip()
|
||||
.lower()
|
||||
)
|
||||
if answer != "y":
|
||||
print("Aborted dropping branch.")
|
||||
return
|
||||
|
||||
# Local delete
|
||||
try:
|
||||
run_git(["branch", "-d", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(f"Failed to delete local branch {name!r}: {exc}") from exc
|
||||
delete_local_branch(name, cwd=cwd, force=False)
|
||||
|
||||
# Remote delete
|
||||
# Remote delete (special-case message)
|
||||
try:
|
||||
run_git(["push", "origin", "--delete", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
delete_remote_branch("origin", name, cwd=cwd)
|
||||
except GitDeleteRemoteBranchError as exc:
|
||||
raise RuntimeError(
|
||||
f"Branch {name!r} was deleted locally, but remote deletion failed: {exc}"
|
||||
) from exc
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
from .utils import _resolve_base_branch
|
||||
|
||||
from pkgmgr.core.git.commands import (
|
||||
checkout,
|
||||
create_branch,
|
||||
fetch,
|
||||
pull,
|
||||
push_upstream,
|
||||
)
|
||||
from pkgmgr.core.git.queries import resolve_base_branch
|
||||
|
||||
|
||||
def open_branch(
|
||||
@@ -13,7 +21,6 @@ def open_branch(
|
||||
"""
|
||||
Create and push a new feature branch on top of a base branch.
|
||||
"""
|
||||
|
||||
# Request name interactively if not provided
|
||||
if not name:
|
||||
name = input("Enter new branch name: ").strip()
|
||||
@@ -21,44 +28,13 @@ def open_branch(
|
||||
if not name:
|
||||
raise RuntimeError("Branch name must not be empty.")
|
||||
|
||||
resolved_base = _resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
resolved_base = resolve_base_branch(base_branch, fallback_base, cwd=cwd)
|
||||
|
||||
# 1) Fetch from origin
|
||||
try:
|
||||
run_git(["fetch", "origin"], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to fetch from origin before creating branch {name!r}: {exc}"
|
||||
) from exc
|
||||
# Workflow (commands raise specific GitBaseError subclasses)
|
||||
fetch("origin", cwd=cwd)
|
||||
checkout(resolved_base, cwd=cwd)
|
||||
pull("origin", resolved_base, cwd=cwd)
|
||||
|
||||
# 2) Checkout base branch
|
||||
try:
|
||||
run_git(["checkout", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to checkout base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 3) Pull latest changes
|
||||
try:
|
||||
run_git(["pull", "origin", resolved_base], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to pull latest changes for base branch {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 4) Create new branch
|
||||
try:
|
||||
run_git(["checkout", "-b", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to create new branch {name!r} from base {resolved_base!r}: {exc}"
|
||||
) from exc
|
||||
|
||||
# 5) Push new branch
|
||||
try:
|
||||
run_git(["push", "-u", "origin", name], cwd=cwd)
|
||||
except GitError as exc:
|
||||
raise RuntimeError(
|
||||
f"Failed to push new branch {name!r} to origin: {exc}"
|
||||
) from exc
|
||||
# Create new branch from resolved base and push it with upstream tracking
|
||||
create_branch(name, resolved_base, cwd=cwd)
|
||||
push_upstream("origin", name, cwd=cwd)
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
from __future__ import annotations
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
|
||||
|
||||
def _resolve_base_branch(
|
||||
preferred: str,
|
||||
fallback: str,
|
||||
cwd: str,
|
||||
) -> str:
|
||||
"""
|
||||
Resolve the base branch to use.
|
||||
|
||||
Try `preferred` first (default: main),
|
||||
fall back to `fallback` (default: master).
|
||||
|
||||
Raise RuntimeError if neither exists.
|
||||
"""
|
||||
for candidate in (preferred, fallback):
|
||||
try:
|
||||
run_git(["rev-parse", "--verify", candidate], cwd=cwd)
|
||||
return candidate
|
||||
except GitError:
|
||||
continue
|
||||
|
||||
raise RuntimeError(
|
||||
f"Neither {preferred!r} nor {fallback!r} exist in this repository."
|
||||
)
|
||||
@@ -3,17 +3,16 @@
|
||||
|
||||
"""
|
||||
Helpers to generate changelog information from Git history.
|
||||
|
||||
This module provides a small abstraction around `git log` so that
|
||||
CLI commands can request a changelog between two refs (tags, branches,
|
||||
commits) without dealing with raw subprocess calls.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional
|
||||
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
from pkgmgr.core.git.queries import (
|
||||
get_changelog,
|
||||
GitChangelogQueryError,
|
||||
)
|
||||
|
||||
|
||||
def generate_changelog(
|
||||
@@ -25,48 +24,20 @@ def generate_changelog(
|
||||
"""
|
||||
Generate a plain-text changelog between two Git refs.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
cwd:
|
||||
Repository directory in which to run Git commands.
|
||||
from_ref:
|
||||
Optional starting reference (exclusive). If provided together
|
||||
with `to_ref`, the range `from_ref..to_ref` is used.
|
||||
If only `from_ref` is given, the range `from_ref..HEAD` is used.
|
||||
to_ref:
|
||||
Optional end reference (inclusive). If omitted, `HEAD` is used.
|
||||
include_merges:
|
||||
If False (default), merge commits are filtered out.
|
||||
|
||||
Returns
|
||||
-------
|
||||
str
|
||||
The output of `git log` formatted as a simple text changelog.
|
||||
If no commits are found or Git fails, an explanatory message
|
||||
is returned instead of raising.
|
||||
Returns a human-readable message instead of raising.
|
||||
"""
|
||||
# Determine the revision range
|
||||
if to_ref is None:
|
||||
to_ref = "HEAD"
|
||||
|
||||
if from_ref:
|
||||
rev_range = f"{from_ref}..{to_ref}"
|
||||
else:
|
||||
rev_range = to_ref
|
||||
|
||||
# Use a custom pretty format that includes tags/refs (%d)
|
||||
cmd = [
|
||||
"log",
|
||||
"--pretty=format:%h %d %s",
|
||||
]
|
||||
if not include_merges:
|
||||
cmd.append("--no-merges")
|
||||
cmd.append(rev_range)
|
||||
|
||||
rev_range = f"{from_ref}..{to_ref}" if from_ref else to_ref
|
||||
try:
|
||||
output = run_git(cmd, cwd=cwd)
|
||||
except GitError as exc:
|
||||
# Do not raise to the CLI, return a human-readable error instead.
|
||||
output = get_changelog(
|
||||
cwd=cwd,
|
||||
from_ref=from_ref,
|
||||
to_ref=to_ref,
|
||||
include_merges=include_merges,
|
||||
)
|
||||
except GitChangelogQueryError as exc:
|
||||
return (
|
||||
f"[ERROR] Failed to generate changelog in {cwd!r} "
|
||||
f"for range {rev_range!r}:\n{exc}"
|
||||
|
||||
@@ -1,15 +1,18 @@
|
||||
import yaml
|
||||
import os
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
|
||||
def interactive_add(config,USER_CONFIG_PATH:str):
|
||||
|
||||
def interactive_add(config, USER_CONFIG_PATH: str):
|
||||
"""Interactively prompt the user to add a new repository entry to the user config."""
|
||||
print("Adding a new repository configuration entry.")
|
||||
new_entry = {}
|
||||
new_entry["provider"] = input("Provider (e.g., github.com): ").strip()
|
||||
new_entry["account"] = input("Account (e.g., yourusername): ").strip()
|
||||
new_entry["repository"] = input("Repository name (e.g., mytool): ").strip()
|
||||
new_entry["command"] = input("Command (optional, leave blank to auto-detect): ").strip()
|
||||
new_entry["command"] = input(
|
||||
"Command (optional, leave blank to auto-detect): "
|
||||
).strip()
|
||||
new_entry["description"] = input("Description (optional): ").strip()
|
||||
new_entry["replacement"] = input("Replacement (optional): ").strip()
|
||||
new_entry["alias"] = input("Alias (optional): ").strip()
|
||||
@@ -25,12 +28,12 @@ def interactive_add(config,USER_CONFIG_PATH:str):
|
||||
confirm = input("Add this entry to user config? (y/N): ").strip().lower()
|
||||
if confirm == "y":
|
||||
if os.path.exists(USER_CONFIG_PATH):
|
||||
with open(USER_CONFIG_PATH, 'r') as f:
|
||||
with open(USER_CONFIG_PATH, "r") as f:
|
||||
user_config = yaml.safe_load(f) or {}
|
||||
else:
|
||||
user_config = {"repositories": []}
|
||||
user_config.setdefault("repositories", [])
|
||||
user_config["repositories"].append(new_entry)
|
||||
save_user_config(user_config,USER_CONFIG_PATH)
|
||||
save_user_config(user_config, USER_CONFIG_PATH)
|
||||
else:
|
||||
print("Entry not added.")
|
||||
print("Entry not added.")
|
||||
|
||||
@@ -14,7 +14,7 @@ with the expected structure:
|
||||
|
||||
For each discovered repository, the function:
|
||||
• derives provider, account, repository from the folder structure
|
||||
• (optionally) determines the latest commit hash via git log
|
||||
• (optionally) determines the latest commit hash via git
|
||||
• generates a unique CLI alias
|
||||
• marks ignore=True for newly discovered repos
|
||||
• skips repos already known in defaults or user config
|
||||
@@ -23,11 +23,11 @@ For each discovered repository, the function:
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import subprocess
|
||||
from typing import Any, Dict
|
||||
|
||||
from pkgmgr.core.command.alias import generate_alias
|
||||
from pkgmgr.core.config.save import save_user_config
|
||||
from pkgmgr.core.git.queries import get_latest_commit
|
||||
|
||||
|
||||
def config_init(
|
||||
@@ -107,36 +107,33 @@ def config_init(
|
||||
# Already known?
|
||||
if key in default_keys:
|
||||
skipped += 1
|
||||
print(f"[SKIP] (defaults) {provider}/{account}/{repo_name}")
|
||||
print(
|
||||
f"[SKIP] (defaults) {provider}/{account}/{repo_name}"
|
||||
)
|
||||
continue
|
||||
if key in existing_keys:
|
||||
skipped += 1
|
||||
print(f"[SKIP] (user-config) {provider}/{account}/{repo_name}")
|
||||
print(
|
||||
f"[SKIP] (user-config) {provider}/{account}/{repo_name}"
|
||||
)
|
||||
continue
|
||||
|
||||
print(f"[ADD] {provider}/{account}/{repo_name}")
|
||||
|
||||
# Determine commit hash
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "log", "-1", "--format=%H"],
|
||||
cwd=repo_path,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
check=True,
|
||||
# Determine commit hash via git query
|
||||
verified_commit = get_latest_commit(repo_path) or ""
|
||||
if verified_commit:
|
||||
print(f"[INFO] Latest commit: {verified_commit}")
|
||||
else:
|
||||
print(
|
||||
"[WARN] Could not read commit (not a git repo or no commits)."
|
||||
)
|
||||
verified = result.stdout.strip()
|
||||
print(f"[INFO] Latest commit: {verified}")
|
||||
except Exception as exc:
|
||||
verified = ""
|
||||
print(f"[WARN] Could not read commit: {exc}")
|
||||
|
||||
entry = {
|
||||
entry: Dict[str, Any] = {
|
||||
"provider": provider,
|
||||
"account": account,
|
||||
"repository": repo_name,
|
||||
"verified": {"commit": verified},
|
||||
"verified": {"commit": verified_commit},
|
||||
"ignore": True,
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import yaml
|
||||
from pkgmgr.core.config.load import load_config
|
||||
|
||||
|
||||
def show_config(selected_repos, user_config_path, full_config=False):
|
||||
"""Display configuration for one or more repositories, or the entire merged config."""
|
||||
if full_config:
|
||||
@@ -8,8 +9,10 @@ def show_config(selected_repos, user_config_path, full_config=False):
|
||||
print(yaml.dump(merged, default_flow_style=False))
|
||||
else:
|
||||
for repo in selected_repos:
|
||||
identifier = f'{repo.get("provider")}/{repo.get("account")}/{repo.get("repository")}'
|
||||
identifier = (
|
||||
f"{repo.get('provider')}/{repo.get('account')}/{repo.get('repository')}"
|
||||
)
|
||||
print(f"Repository: {identifier}")
|
||||
for key, value in repo.items():
|
||||
print(f" {key}: {value}")
|
||||
print("-" * 40)
|
||||
print("-" * 40)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# src/pkgmgr/actions/install/__init__.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
@@ -15,7 +16,7 @@ Responsibilities:
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Any, Dict, List, Optional
|
||||
from typing import Any, Dict, List, Optional, Tuple
|
||||
|
||||
from pkgmgr.core.repository.identifier import get_repo_identifier
|
||||
from pkgmgr.core.repository.dir import get_repo_dir
|
||||
@@ -27,7 +28,7 @@ from pkgmgr.actions.install.installers.os_packages import (
|
||||
DebianControlInstaller,
|
||||
RpmSpecInstaller,
|
||||
)
|
||||
from pkgmgr.actions.install.installers.nix_flake import (
|
||||
from pkgmgr.actions.install.installers.nix import (
|
||||
NixFlakeInstaller,
|
||||
)
|
||||
from pkgmgr.actions.install.installers.python import PythonInstaller
|
||||
@@ -36,10 +37,8 @@ from pkgmgr.actions.install.installers.makefile import (
|
||||
)
|
||||
from pkgmgr.actions.install.pipeline import InstallationPipeline
|
||||
|
||||
|
||||
Repository = Dict[str, Any]
|
||||
|
||||
# All available installers, in the order they should be considered.
|
||||
INSTALLERS = [
|
||||
ArchPkgbuildInstaller(),
|
||||
DebianControlInstaller(),
|
||||
@@ -50,11 +49,6 @@ INSTALLERS = [
|
||||
]
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ensure_repo_dir(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
@@ -72,10 +66,7 @@ def _ensure_repo_dir(
|
||||
repo_dir = get_repo_dir(repositories_base_dir, repo)
|
||||
|
||||
if not os.path.exists(repo_dir):
|
||||
print(
|
||||
f"Repository directory '{repo_dir}' does not exist. "
|
||||
"Cloning it now..."
|
||||
)
|
||||
print(f"Repository directory '{repo_dir}' does not exist. Cloning it now...")
|
||||
clone_repos(
|
||||
[repo],
|
||||
repositories_base_dir,
|
||||
@@ -85,10 +76,7 @@ def _ensure_repo_dir(
|
||||
clone_mode,
|
||||
)
|
||||
if not os.path.exists(repo_dir):
|
||||
print(
|
||||
f"Cloning failed for repository {identifier}. "
|
||||
"Skipping installation."
|
||||
)
|
||||
print(f"Cloning failed for repository {identifier}. Skipping installation.")
|
||||
return None
|
||||
|
||||
return repo_dir
|
||||
@@ -99,6 +87,7 @@ def _verify_repo(
|
||||
repo_dir: str,
|
||||
no_verification: bool,
|
||||
identifier: str,
|
||||
silent: bool,
|
||||
) -> bool:
|
||||
"""
|
||||
Verify a repository using the configured verification data.
|
||||
@@ -117,10 +106,17 @@ def _verify_repo(
|
||||
print(f"Warning: Verification failed for {identifier}:")
|
||||
for err in errors:
|
||||
print(f" - {err}")
|
||||
choice = input("Continue anyway? [y/N]: ").strip().lower()
|
||||
if choice != "y":
|
||||
print(f"Skipping installation for {identifier}.")
|
||||
return False
|
||||
|
||||
if silent:
|
||||
# Non-interactive mode: continue with a warning.
|
||||
print(
|
||||
f"[Warning] Continuing despite verification failure for {identifier} (--silent)."
|
||||
)
|
||||
else:
|
||||
choice = input("Continue anyway? [y/N]: ").strip().lower()
|
||||
if choice != "y":
|
||||
print(f"Skipping installation for {identifier}.")
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
@@ -137,6 +133,7 @@ def _create_context(
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
force_update: bool,
|
||||
) -> RepoContext:
|
||||
"""
|
||||
Build a RepoContext instance for the given repository.
|
||||
@@ -153,14 +150,10 @@ def _create_context(
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
force_update=force_update,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def install_repos(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
@@ -171,48 +164,86 @@ def install_repos(
|
||||
quiet: bool,
|
||||
clone_mode: str,
|
||||
update_dependencies: bool,
|
||||
force_update: bool = False,
|
||||
silent: bool = False,
|
||||
emit_summary: bool = True,
|
||||
) -> None:
|
||||
"""
|
||||
Install one or more repositories according to the configured installers
|
||||
and the CLI layer precedence rules.
|
||||
|
||||
If force_update=True, installers of the currently active layer are allowed
|
||||
to run again (upgrade/refresh), even if that layer is already loaded.
|
||||
|
||||
If silent=True, repository failures are downgraded to warnings and the
|
||||
overall command never exits non-zero because of per-repository failures.
|
||||
"""
|
||||
pipeline = InstallationPipeline(INSTALLERS)
|
||||
failures: List[Tuple[str, str]] = []
|
||||
|
||||
for repo in selected_repos:
|
||||
identifier = get_repo_identifier(repo, all_repos)
|
||||
|
||||
repo_dir = _ensure_repo_dir(
|
||||
repo=repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
no_verification=no_verification,
|
||||
clone_mode=clone_mode,
|
||||
identifier=identifier,
|
||||
)
|
||||
if not repo_dir:
|
||||
try:
|
||||
repo_dir = _ensure_repo_dir(
|
||||
repo=repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
no_verification=no_verification,
|
||||
clone_mode=clone_mode,
|
||||
identifier=identifier,
|
||||
)
|
||||
if not repo_dir:
|
||||
failures.append((identifier, "clone/ensure repo directory failed"))
|
||||
continue
|
||||
|
||||
if not _verify_repo(
|
||||
repo=repo,
|
||||
repo_dir=repo_dir,
|
||||
no_verification=no_verification,
|
||||
identifier=identifier,
|
||||
silent=silent,
|
||||
):
|
||||
continue
|
||||
|
||||
ctx = _create_context(
|
||||
repo=repo,
|
||||
identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
bin_dir=bin_dir,
|
||||
all_repos=all_repos,
|
||||
no_verification=no_verification,
|
||||
preview=preview,
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
force_update=force_update,
|
||||
)
|
||||
|
||||
pipeline.run(ctx)
|
||||
|
||||
except SystemExit as exc:
|
||||
code = exc.code if isinstance(exc.code, int) else str(exc.code)
|
||||
failures.append((identifier, f"installer failed (exit={code})"))
|
||||
if not quiet:
|
||||
print(
|
||||
f"[Warning] install: repository {identifier} failed (exit={code}). Continuing..."
|
||||
)
|
||||
continue
|
||||
except Exception as exc:
|
||||
failures.append((identifier, f"unexpected error: {exc}"))
|
||||
if not quiet:
|
||||
print(
|
||||
f"[Warning] install: repository {identifier} hit an unexpected error: {exc}. Continuing..."
|
||||
)
|
||||
continue
|
||||
|
||||
if not _verify_repo(
|
||||
repo=repo,
|
||||
repo_dir=repo_dir,
|
||||
no_verification=no_verification,
|
||||
identifier=identifier,
|
||||
):
|
||||
continue
|
||||
if failures and emit_summary and not quiet:
|
||||
print("\n[pkgmgr] Installation finished with warnings:")
|
||||
for ident, msg in failures:
|
||||
print(f" - {ident}: {msg}")
|
||||
|
||||
ctx = _create_context(
|
||||
repo=repo,
|
||||
identifier=identifier,
|
||||
repo_dir=repo_dir,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
bin_dir=bin_dir,
|
||||
all_repos=all_repos,
|
||||
no_verification=no_verification,
|
||||
preview=preview,
|
||||
quiet=quiet,
|
||||
clone_mode=clone_mode,
|
||||
update_dependencies=update_dependencies,
|
||||
)
|
||||
|
||||
pipeline.run(ctx)
|
||||
if failures and not silent:
|
||||
raise SystemExit(1)
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# src/pkgmgr/actions/install/context.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
@@ -28,3 +29,6 @@ class RepoContext:
|
||||
quiet: bool
|
||||
clone_mode: str
|
||||
update_dependencies: bool
|
||||
|
||||
# If True, allow re-running installers of the currently active layer.
|
||||
force_update: bool = False
|
||||
|
||||
@@ -9,11 +9,15 @@ pkgmgr.actions.install.installers.
|
||||
"""
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.nix_flake import NixFlakeInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.nix import NixFlakeInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.python import PythonInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.makefile import MakefileInstaller # noqa: F401
|
||||
|
||||
# OS-specific installers
|
||||
from pkgmgr.actions.install.installers.os_packages.arch_pkgbuild import ArchPkgbuildInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.os_packages.debian_control import DebianControlInstaller # noqa: F401
|
||||
from pkgmgr.actions.install.installers.os_packages.arch_pkgbuild import (
|
||||
ArchPkgbuildInstaller as ArchPkgbuildInstaller,
|
||||
) # noqa: F401
|
||||
from pkgmgr.actions.install.installers.os_packages.debian_control import (
|
||||
DebianControlInstaller as DebianControlInstaller,
|
||||
) # noqa: F401
|
||||
from pkgmgr.actions.install.installers.os_packages.rpm_spec import RpmSpecInstaller # noqa: F401
|
||||
|
||||
@@ -41,7 +41,9 @@ class BaseInstaller(ABC):
|
||||
return caps
|
||||
|
||||
for matcher in CAPABILITY_MATCHERS:
|
||||
if matcher.applies_to_layer(self.layer) and matcher.is_provided(ctx, self.layer):
|
||||
if matcher.applies_to_layer(self.layer) and matcher.is_provided(
|
||||
ctx, self.layer
|
||||
):
|
||||
caps.add(matcher.name)
|
||||
|
||||
return caps
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
# src/pkgmgr/actions/install/installers/makefile.py
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
@@ -9,32 +10,14 @@ from pkgmgr.core.command.run import run_command
|
||||
|
||||
|
||||
class MakefileInstaller(BaseInstaller):
|
||||
"""
|
||||
Generic installer that runs `make install` if a Makefile with an
|
||||
install target is present.
|
||||
|
||||
Safety rules:
|
||||
- If PKGMGR_DISABLE_MAKEFILE_INSTALLER=1 is set, this installer
|
||||
is globally disabled.
|
||||
- The higher-level InstallationPipeline ensures that Makefile
|
||||
installation does not run if a stronger CLI layer already owns
|
||||
the command (e.g. Nix or OS packages).
|
||||
"""
|
||||
|
||||
layer = "makefile"
|
||||
MAKEFILE_NAME = "Makefile"
|
||||
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
"""
|
||||
Return True if this repository has a Makefile and the installer
|
||||
is not globally disabled.
|
||||
"""
|
||||
# Optional global kill switch.
|
||||
if os.environ.get("PKGMGR_DISABLE_MAKEFILE_INSTALLER") == "1":
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
"[INFO] MakefileInstaller is disabled via "
|
||||
"PKGMGR_DISABLE_MAKEFILE_INSTALLER."
|
||||
"[INFO] PKGMGR_DISABLE_MAKEFILE_INSTALLER=1 – skipping MakefileInstaller."
|
||||
)
|
||||
return False
|
||||
|
||||
@@ -42,56 +25,34 @@ class MakefileInstaller(BaseInstaller):
|
||||
return os.path.exists(makefile_path)
|
||||
|
||||
def _has_install_target(self, makefile_path: str) -> bool:
|
||||
"""
|
||||
Heuristically check whether the Makefile defines an install target.
|
||||
|
||||
We look for:
|
||||
|
||||
- a plain 'install:' target, or
|
||||
- any 'install-*:' style target.
|
||||
"""
|
||||
try:
|
||||
with open(makefile_path, "r", encoding="utf-8", errors="ignore") as f:
|
||||
content = f.read()
|
||||
except OSError:
|
||||
return False
|
||||
|
||||
# Simple heuristics: look for "install:" or targets starting with "install-"
|
||||
if re.search(r"^install\s*:", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
if re.search(r"^install-[a-zA-Z0-9_-]*\s*:", content, flags=re.MULTILINE):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Execute `make install` in the repository directory if an install
|
||||
target exists.
|
||||
"""
|
||||
makefile_path = os.path.join(ctx.repo_dir, self.MAKEFILE_NAME)
|
||||
|
||||
if not os.path.exists(makefile_path):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Makefile '{makefile_path}' not found, "
|
||||
"skipping MakefileInstaller."
|
||||
)
|
||||
return
|
||||
|
||||
if not self._has_install_target(makefile_path):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] No 'install' target found in {makefile_path}."
|
||||
)
|
||||
print(f"[pkgmgr] No 'install' target found in {makefile_path}.")
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running 'make install' in {ctx.repo_dir} "
|
||||
"(MakefileInstaller)"
|
||||
f"[pkgmgr] Running make install for {ctx.identifier} (MakefileInstaller)"
|
||||
)
|
||||
|
||||
cmd = "make install"
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
run_command("make install", cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
if ctx.force_update and not ctx.quiet:
|
||||
print(f"[makefile] repo '{ctx.identifier}' successfully upgraded.")
|
||||
|
||||
4
src/pkgmgr/actions/install/installers/nix/__init__.py
Normal file
4
src/pkgmgr/actions/install/installers/nix/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
from .installer import NixFlakeInstaller
|
||||
from .retry import RetryPolicy
|
||||
|
||||
__all__ = ["NixFlakeInstaller", "RetryPolicy"]
|
||||
104
src/pkgmgr/actions/install/installers/nix/conflicts.py
Normal file
104
src/pkgmgr/actions/install/installers/nix/conflicts.py
Normal file
@@ -0,0 +1,104 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, List
|
||||
|
||||
from .profile import NixProfileInspector
|
||||
from .retry import GitHubRateLimitRetry
|
||||
from .runner import CommandRunner
|
||||
from .textparse import NixConflictTextParser
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
|
||||
|
||||
class NixConflictResolver:
|
||||
"""
|
||||
Resolves nix profile file conflicts by:
|
||||
1. Parsing conflicting store paths from stderr
|
||||
2. Mapping them to profile remove tokens via `nix profile list --json`
|
||||
3. Removing those tokens deterministically
|
||||
4. Retrying install
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
runner: CommandRunner,
|
||||
retry: GitHubRateLimitRetry,
|
||||
profile: NixProfileInspector,
|
||||
) -> None:
|
||||
self._runner = runner
|
||||
self._retry = retry
|
||||
self._profile = profile
|
||||
self._parser = NixConflictTextParser()
|
||||
|
||||
def resolve(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
install_cmd: str,
|
||||
stdout: str,
|
||||
stderr: str,
|
||||
*,
|
||||
output: str,
|
||||
max_rounds: int = 10,
|
||||
) -> bool:
|
||||
quiet = bool(getattr(ctx, "quiet", False))
|
||||
combined = f"{stdout}\n{stderr}"
|
||||
|
||||
for _ in range(max_rounds):
|
||||
# 1) Extract conflicting store prefixes from nix error output
|
||||
store_prefixes = self._parser.existing_store_prefixes(combined)
|
||||
|
||||
# 2) Resolve them to concrete remove tokens
|
||||
tokens: List[str] = self._profile.find_remove_tokens_for_store_prefixes(
|
||||
ctx,
|
||||
self._runner,
|
||||
store_prefixes,
|
||||
)
|
||||
|
||||
# 3) Fallback: output-name based lookup (also covers nix suggesting: `nix profile remove pkgmgr`)
|
||||
if not tokens:
|
||||
tokens = self._profile.find_remove_tokens_for_output(
|
||||
ctx, self._runner, output
|
||||
)
|
||||
|
||||
if tokens:
|
||||
if not quiet:
|
||||
print(
|
||||
"[nix] conflict detected; removing existing profile entries: "
|
||||
+ ", ".join(tokens)
|
||||
)
|
||||
|
||||
for t in tokens:
|
||||
# tokens may contain things like "pkgmgr" or "pkgmgr-1" or quoted tokens (we keep raw)
|
||||
self._runner.run(ctx, f"nix profile remove {t}", allow_failure=True)
|
||||
|
||||
res = self._retry.run_with_retry(ctx, self._runner, install_cmd)
|
||||
if res.returncode == 0:
|
||||
return True
|
||||
|
||||
combined = f"{res.stdout}\n{res.stderr}"
|
||||
continue
|
||||
|
||||
# 4) Last-resort fallback: use textual remove tokens from stderr (“nix profile remove X”)
|
||||
tokens = self._parser.remove_tokens(combined)
|
||||
if tokens:
|
||||
if not quiet:
|
||||
print("[nix] fallback remove tokens: " + ", ".join(tokens))
|
||||
|
||||
for t in tokens:
|
||||
self._runner.run(ctx, f"nix profile remove {t}", allow_failure=True)
|
||||
|
||||
res = self._retry.run_with_retry(ctx, self._runner, install_cmd)
|
||||
if res.returncode == 0:
|
||||
return True
|
||||
|
||||
combined = f"{res.stdout}\n{res.stderr}"
|
||||
continue
|
||||
|
||||
if not quiet:
|
||||
print(
|
||||
"[nix] conflict detected but could not resolve profile entries to remove."
|
||||
)
|
||||
return False
|
||||
|
||||
return False
|
||||
251
src/pkgmgr/actions/install/installers/nix/installer.py
Normal file
251
src/pkgmgr/actions/install/installers/nix/installer.py
Normal file
@@ -0,0 +1,251 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
|
||||
from .conflicts import NixConflictResolver
|
||||
from .profile import NixProfileInspector
|
||||
from .retry import GitHubRateLimitRetry, RetryPolicy
|
||||
from .runner import CommandRunner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
|
||||
|
||||
class NixFlakeInstaller(BaseInstaller):
|
||||
layer = "nix"
|
||||
FLAKE_FILE = "flake.nix"
|
||||
|
||||
def __init__(self, policy: RetryPolicy | None = None) -> None:
|
||||
self._runner = CommandRunner()
|
||||
self._retry = GitHubRateLimitRetry(policy=policy)
|
||||
self._profile = NixProfileInspector()
|
||||
self._conflicts = NixConflictResolver(self._runner, self._retry, self._profile)
|
||||
|
||||
# Newer nix rejects numeric indices; we learn this at runtime and cache the decision.
|
||||
self._indices_supported: bool | None = None
|
||||
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
if os.environ.get("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER") == "1":
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
"[INFO] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 – "
|
||||
"skipping NixFlakeInstaller."
|
||||
)
|
||||
return False
|
||||
|
||||
if shutil.which("nix") is None:
|
||||
return False
|
||||
|
||||
return os.path.exists(os.path.join(ctx.repo_dir, self.FLAKE_FILE))
|
||||
|
||||
def _profile_outputs(self, ctx: "RepoContext") -> List[Tuple[str, bool]]:
|
||||
# (output_name, allow_failure)
|
||||
if ctx.identifier in {"pkgmgr", "package-manager"}:
|
||||
return [("pkgmgr", False), ("default", True)]
|
||||
return [("default", False)]
|
||||
|
||||
def run(self, ctx: "RepoContext") -> None:
|
||||
if not self.supports(ctx):
|
||||
return
|
||||
|
||||
outputs = self._profile_outputs(ctx)
|
||||
|
||||
if not ctx.quiet:
|
||||
msg = (
|
||||
"[nix] flake detected in "
|
||||
f"{ctx.identifier}, ensuring outputs: "
|
||||
+ ", ".join(name for name, _ in outputs)
|
||||
)
|
||||
print(msg)
|
||||
|
||||
for output, allow_failure in outputs:
|
||||
if ctx.force_update:
|
||||
self._force_upgrade_output(ctx, output, allow_failure)
|
||||
else:
|
||||
self._install_only(ctx, output, allow_failure)
|
||||
|
||||
def _installable(self, ctx: "RepoContext", output: str) -> str:
|
||||
return f"{ctx.repo_dir}#{output}"
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Core install path
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _install_only(
|
||||
self, ctx: "RepoContext", output: str, allow_failure: bool
|
||||
) -> None:
|
||||
install_cmd = f"nix profile install {self._installable(ctx, output)}"
|
||||
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] install: {install_cmd}")
|
||||
|
||||
res = self._retry.run_with_retry(ctx, self._runner, install_cmd)
|
||||
if res.returncode == 0:
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully installed.")
|
||||
return
|
||||
|
||||
# Conflict resolver first (handles the common “existing package already provides file” case)
|
||||
if self._conflicts.resolve(
|
||||
ctx,
|
||||
install_cmd,
|
||||
res.stdout,
|
||||
res.stderr,
|
||||
output=output,
|
||||
):
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[nix] output '{output}' successfully installed after conflict cleanup."
|
||||
)
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[nix] install failed for '{output}' (exit {res.returncode}), "
|
||||
"trying upgrade/remove+install..."
|
||||
)
|
||||
|
||||
# If indices are supported, try legacy index-upgrade path.
|
||||
if self._indices_supported is not False:
|
||||
indices = self._profile.find_installed_indices_for_output(
|
||||
ctx, self._runner, output
|
||||
)
|
||||
|
||||
upgraded = False
|
||||
for idx in indices:
|
||||
if self._upgrade_index(ctx, idx):
|
||||
upgraded = True
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[nix] output '{output}' successfully upgraded (index {idx})."
|
||||
)
|
||||
|
||||
if upgraded:
|
||||
return
|
||||
|
||||
if indices and not ctx.quiet:
|
||||
print(
|
||||
f"[nix] upgrade failed; removing indices {indices} and reinstalling '{output}'."
|
||||
)
|
||||
|
||||
for idx in indices:
|
||||
self._remove_index(ctx, idx)
|
||||
|
||||
# If we learned indices are unsupported, immediately fall back below
|
||||
if self._indices_supported is False:
|
||||
self._remove_tokens_for_output(ctx, output)
|
||||
|
||||
else:
|
||||
# indices explicitly unsupported
|
||||
self._remove_tokens_for_output(ctx, output)
|
||||
|
||||
final = self._runner.run(ctx, install_cmd, allow_failure=True)
|
||||
if final.returncode == 0:
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully re-installed.")
|
||||
return
|
||||
|
||||
print(
|
||||
f"[ERROR] Failed to install Nix flake output '{output}' (exit {final.returncode})"
|
||||
)
|
||||
if not allow_failure:
|
||||
raise SystemExit(final.returncode)
|
||||
|
||||
print(f"[WARNING] Continuing despite failure of optional output '{output}'.")
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# force_update path
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _force_upgrade_output(
|
||||
self, ctx: "RepoContext", output: str, allow_failure: bool
|
||||
) -> None:
|
||||
# Prefer token path if indices unsupported (new nix)
|
||||
if self._indices_supported is False:
|
||||
self._remove_tokens_for_output(ctx, output)
|
||||
self._install_only(ctx, output, allow_failure)
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded.")
|
||||
return
|
||||
|
||||
indices = self._profile.find_installed_indices_for_output(
|
||||
ctx, self._runner, output
|
||||
)
|
||||
|
||||
upgraded_any = False
|
||||
for idx in indices:
|
||||
if self._upgrade_index(ctx, idx):
|
||||
upgraded_any = True
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[nix] output '{output}' successfully upgraded (index {idx})."
|
||||
)
|
||||
|
||||
if upgraded_any:
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded.")
|
||||
return
|
||||
|
||||
if indices and not ctx.quiet:
|
||||
print(
|
||||
f"[nix] upgrade failed; removing indices {indices} and reinstalling '{output}'."
|
||||
)
|
||||
|
||||
for idx in indices:
|
||||
self._remove_index(ctx, idx)
|
||||
|
||||
# If we learned indices are unsupported, also remove by token to actually clear conflicts
|
||||
if self._indices_supported is False:
|
||||
self._remove_tokens_for_output(ctx, output)
|
||||
|
||||
self._install_only(ctx, output, allow_failure)
|
||||
|
||||
if not ctx.quiet:
|
||||
print(f"[nix] output '{output}' successfully upgraded.")
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Helpers
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def _stderr_says_indices_unsupported(self, stderr: str) -> bool:
|
||||
s = (stderr or "").lower()
|
||||
return "no longer supports indices" in s or "does not support indices" in s
|
||||
|
||||
def _upgrade_index(self, ctx: "RepoContext", idx: int) -> bool:
|
||||
cmd = f"nix profile upgrade --refresh {idx}"
|
||||
res = self._runner.run(ctx, cmd, allow_failure=True)
|
||||
|
||||
if self._stderr_says_indices_unsupported(getattr(res, "stderr", "")):
|
||||
self._indices_supported = False
|
||||
return False
|
||||
|
||||
if self._indices_supported is None:
|
||||
self._indices_supported = True
|
||||
|
||||
return res.returncode == 0
|
||||
|
||||
def _remove_index(self, ctx: "RepoContext", idx: int) -> None:
|
||||
res = self._runner.run(ctx, f"nix profile remove {idx}", allow_failure=True)
|
||||
|
||||
if self._stderr_says_indices_unsupported(getattr(res, "stderr", "")):
|
||||
self._indices_supported = False
|
||||
|
||||
if self._indices_supported is None:
|
||||
self._indices_supported = True
|
||||
|
||||
def _remove_tokens_for_output(self, ctx: "RepoContext", output: str) -> None:
|
||||
tokens = self._profile.find_remove_tokens_for_output(ctx, self._runner, output)
|
||||
if not tokens:
|
||||
return
|
||||
|
||||
if not ctx.quiet:
|
||||
print(
|
||||
f"[nix] indices unsupported; removing by token(s): {', '.join(tokens)}"
|
||||
)
|
||||
|
||||
for t in tokens:
|
||||
self._runner.run(ctx, f"nix profile remove {t}", allow_failure=True)
|
||||
@@ -0,0 +1,4 @@
|
||||
from .inspector import NixProfileInspector
|
||||
from .models import NixProfileEntry
|
||||
|
||||
__all__ = ["NixProfileInspector", "NixProfileEntry"]
|
||||
164
src/pkgmgr/actions/install/installers/nix/profile/inspector.py
Normal file
164
src/pkgmgr/actions/install/installers/nix/profile/inspector.py
Normal file
@@ -0,0 +1,164 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, List, TYPE_CHECKING
|
||||
|
||||
from .matcher import (
|
||||
entry_matches_output,
|
||||
entry_matches_store_path,
|
||||
stable_unique_ints,
|
||||
)
|
||||
from .normalizer import normalize_elements
|
||||
from .parser import parse_profile_list_json
|
||||
from .result import extract_stdout_text
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# Keep these as TYPE_CHECKING-only to avoid runtime import cycles.
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.core.command.runner import CommandRunner
|
||||
|
||||
|
||||
class NixProfileInspector:
|
||||
"""
|
||||
Reads and inspects the user's Nix profile list (JSON).
|
||||
|
||||
Public API:
|
||||
- list_json()
|
||||
- find_installed_indices_for_output() (legacy; may not work on newer nix)
|
||||
- find_indices_by_store_path() (legacy; may not work on newer nix)
|
||||
- find_remove_tokens_for_output()
|
||||
- find_remove_tokens_for_store_prefixes()
|
||||
"""
|
||||
|
||||
def list_json(self, ctx: "RepoContext", runner: "CommandRunner") -> dict[str, Any]:
|
||||
res = runner.run(ctx, "nix profile list --json", allow_failure=False)
|
||||
raw = extract_stdout_text(res)
|
||||
return parse_profile_list_json(raw)
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# Legacy index helpers (still useful on older nix; newer nix may reject indices)
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def find_installed_indices_for_output(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
output: str,
|
||||
) -> List[int]:
|
||||
data = self.list_json(ctx, runner)
|
||||
entries = normalize_elements(data)
|
||||
|
||||
hits: List[int] = []
|
||||
for e in entries:
|
||||
if e.index is None:
|
||||
continue
|
||||
if entry_matches_output(e, output):
|
||||
hits.append(e.index)
|
||||
|
||||
return stable_unique_ints(hits)
|
||||
|
||||
def find_indices_by_store_path(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
store_path: str,
|
||||
) -> List[int]:
|
||||
needle = (store_path or "").strip()
|
||||
if not needle:
|
||||
return []
|
||||
|
||||
data = self.list_json(ctx, runner)
|
||||
entries = normalize_elements(data)
|
||||
|
||||
hits: List[int] = []
|
||||
for e in entries:
|
||||
if e.index is None:
|
||||
continue
|
||||
if entry_matches_store_path(e, needle):
|
||||
hits.append(e.index)
|
||||
|
||||
return stable_unique_ints(hits)
|
||||
|
||||
# ---------------------------------------------------------------------
|
||||
# New token-based helpers (works with newer nix where indices are rejected)
|
||||
# ---------------------------------------------------------------------
|
||||
|
||||
def find_remove_tokens_for_output(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
output: str,
|
||||
) -> List[str]:
|
||||
"""
|
||||
Returns profile remove tokens to remove entries matching a given output.
|
||||
|
||||
We always include the raw output token first because nix itself suggests:
|
||||
nix profile remove pkgmgr
|
||||
"""
|
||||
out = (output or "").strip()
|
||||
if not out:
|
||||
return []
|
||||
|
||||
data = self.list_json(ctx, runner)
|
||||
entries = normalize_elements(data)
|
||||
|
||||
tokens: List[str] = [
|
||||
out
|
||||
] # critical: matches nix's own suggestion for conflicts
|
||||
|
||||
for e in entries:
|
||||
if entry_matches_output(e, out):
|
||||
# Prefer removing by key/name (non-index) when possible.
|
||||
# New nix rejects numeric indices; these tokens are safer.
|
||||
k = (e.key or "").strip()
|
||||
n = (e.name or "").strip()
|
||||
|
||||
if k and not k.isdigit():
|
||||
tokens.append(k)
|
||||
elif n and not n.isdigit():
|
||||
tokens.append(n)
|
||||
|
||||
# stable unique preserving order
|
||||
seen: set[str] = set()
|
||||
uniq: List[str] = []
|
||||
for t in tokens:
|
||||
if t and t not in seen:
|
||||
uniq.append(t)
|
||||
seen.add(t)
|
||||
return uniq
|
||||
|
||||
def find_remove_tokens_for_store_prefixes(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
prefixes: List[str],
|
||||
) -> List[str]:
|
||||
"""
|
||||
Returns remove tokens for entries whose store path matches any prefix.
|
||||
"""
|
||||
prefixes = [(p or "").strip() for p in (prefixes or []) if p]
|
||||
prefixes = [p for p in prefixes if p]
|
||||
if not prefixes:
|
||||
return []
|
||||
|
||||
data = self.list_json(ctx, runner)
|
||||
entries = normalize_elements(data)
|
||||
|
||||
tokens: List[str] = []
|
||||
for e in entries:
|
||||
if not e.store_paths:
|
||||
continue
|
||||
if any(sp == p for sp in e.store_paths for p in prefixes):
|
||||
k = (e.key or "").strip()
|
||||
n = (e.name or "").strip()
|
||||
if k and not k.isdigit():
|
||||
tokens.append(k)
|
||||
elif n and not n.isdigit():
|
||||
tokens.append(n)
|
||||
|
||||
seen: set[str] = set()
|
||||
uniq: List[str] = []
|
||||
for t in tokens:
|
||||
if t and t not in seen:
|
||||
uniq.append(t)
|
||||
seen.add(t)
|
||||
return uniq
|
||||
62
src/pkgmgr/actions/install/installers/nix/profile/matcher.py
Normal file
62
src/pkgmgr/actions/install/installers/nix/profile/matcher.py
Normal file
@@ -0,0 +1,62 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from .models import NixProfileEntry
|
||||
|
||||
|
||||
def entry_matches_output(entry: NixProfileEntry, output: str) -> bool:
|
||||
"""
|
||||
Heuristic matcher: output is typically a flake output name (e.g. "pkgmgr"),
|
||||
and we match against name/attrPath patterns.
|
||||
"""
|
||||
out = (output or "").strip()
|
||||
if not out:
|
||||
return False
|
||||
|
||||
candidates = [entry.name, entry.attr_path]
|
||||
|
||||
for c in candidates:
|
||||
c = (c or "").strip()
|
||||
if not c:
|
||||
continue
|
||||
|
||||
# Direct match
|
||||
if c == out:
|
||||
return True
|
||||
|
||||
# AttrPath contains "#<output>"
|
||||
if f"#{out}" in c:
|
||||
return True
|
||||
|
||||
# AttrPath ends with ".<output>"
|
||||
if c.endswith(f".{out}"):
|
||||
return True
|
||||
|
||||
# Name pattern "<output>-<n>" (common, e.g. pkgmgr-1)
|
||||
if c.startswith(f"{out}-"):
|
||||
return True
|
||||
|
||||
# Historical special case: repo is "package-manager" but output is "pkgmgr"
|
||||
if out == "pkgmgr" and c.startswith("package-manager-"):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def entry_matches_store_path(entry: NixProfileEntry, store_path: str) -> bool:
|
||||
needle = (store_path or "").strip()
|
||||
if not needle:
|
||||
return False
|
||||
return any((p or "") == needle for p in entry.store_paths)
|
||||
|
||||
|
||||
def stable_unique_ints(values: List[int]) -> List[int]:
|
||||
seen: set[int] = set()
|
||||
uniq: List[int] = []
|
||||
for v in values:
|
||||
if v in seen:
|
||||
continue
|
||||
uniq.append(v)
|
||||
seen.add(v)
|
||||
return uniq
|
||||
17
src/pkgmgr/actions/install/installers/nix/profile/models.py
Normal file
17
src/pkgmgr/actions/install/installers/nix/profile/models.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import List, Optional
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class NixProfileEntry:
|
||||
"""
|
||||
Minimal normalized representation of one nix profile element entry.
|
||||
"""
|
||||
|
||||
key: str
|
||||
index: Optional[int]
|
||||
name: str
|
||||
attr_path: str
|
||||
store_paths: List[str]
|
||||
128
src/pkgmgr/actions/install/installers/nix/profile/normalizer.py
Normal file
128
src/pkgmgr/actions/install/installers/nix/profile/normalizer.py
Normal file
@@ -0,0 +1,128 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any, Dict, Iterable, List, Optional
|
||||
|
||||
from .models import NixProfileEntry
|
||||
|
||||
|
||||
def coerce_index(key: str, entry: Dict[str, Any]) -> Optional[int]:
|
||||
"""
|
||||
Nix JSON schema varies:
|
||||
- elements keys might be "0", "1", ...
|
||||
- or might be names like "pkgmgr-1"
|
||||
Some versions include an explicit index field.
|
||||
We try safe options in order.
|
||||
"""
|
||||
k = (key or "").strip()
|
||||
|
||||
# 1) Classic: numeric keys
|
||||
if k.isdigit():
|
||||
try:
|
||||
return int(k)
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
# 2) Explicit index fields (schema-dependent)
|
||||
for field in ("index", "id", "position"):
|
||||
v = entry.get(field)
|
||||
if isinstance(v, int):
|
||||
return v
|
||||
if isinstance(v, str) and v.strip().isdigit():
|
||||
try:
|
||||
return int(v.strip())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# 3) Last resort: extract trailing number from key if it looks like "<name>-<n>"
|
||||
m = re.match(r"^.+-(\d+)$", k)
|
||||
if m:
|
||||
try:
|
||||
return int(m.group(1))
|
||||
except Exception:
|
||||
return None
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def iter_store_paths(entry: Dict[str, Any]) -> Iterable[str]:
|
||||
"""
|
||||
Yield all possible store paths from a nix profile JSON entry.
|
||||
|
||||
Nix has had schema shifts. We support common variants:
|
||||
- "storePaths": ["/nix/store/..", ...]
|
||||
- "storePaths": "/nix/store/.." (rare)
|
||||
- "storePath": "/nix/store/.." (some variants)
|
||||
- nested "outputs" dict(s) with store paths (best-effort)
|
||||
"""
|
||||
if not isinstance(entry, dict):
|
||||
return
|
||||
|
||||
sp = entry.get("storePaths")
|
||||
if isinstance(sp, list):
|
||||
for p in sp:
|
||||
if isinstance(p, str):
|
||||
yield p
|
||||
elif isinstance(sp, str):
|
||||
yield sp
|
||||
|
||||
sp2 = entry.get("storePath")
|
||||
if isinstance(sp2, str):
|
||||
yield sp2
|
||||
|
||||
outs = entry.get("outputs")
|
||||
if isinstance(outs, dict):
|
||||
for _, ov in outs.items():
|
||||
if isinstance(ov, dict):
|
||||
p = ov.get("storePath")
|
||||
if isinstance(p, str):
|
||||
yield p
|
||||
|
||||
|
||||
def normalize_store_path(store_path: str) -> str:
|
||||
"""
|
||||
Normalize store path for matching.
|
||||
Currently just strips whitespace; hook for future normalization if needed.
|
||||
"""
|
||||
return (store_path or "").strip()
|
||||
|
||||
|
||||
def normalize_elements(data: Dict[str, Any]) -> List[NixProfileEntry]:
|
||||
"""
|
||||
Converts nix profile list JSON into a list of normalized entries.
|
||||
|
||||
JSON formats observed:
|
||||
- {"elements": {"0": {...}, "1": {...}}}
|
||||
- {"elements": {"pkgmgr-1": {...}, "pkgmgr-2": {...}}}
|
||||
"""
|
||||
elements = data.get("elements")
|
||||
if not isinstance(elements, dict):
|
||||
return []
|
||||
|
||||
normalized: List[NixProfileEntry] = []
|
||||
|
||||
for k, entry in elements.items():
|
||||
if not isinstance(entry, dict):
|
||||
continue
|
||||
|
||||
idx = coerce_index(str(k), entry)
|
||||
name = str(entry.get("name", "") or "")
|
||||
attr = str(entry.get("attrPath", "") or "")
|
||||
|
||||
store_paths: List[str] = []
|
||||
for p in iter_store_paths(entry):
|
||||
sp = normalize_store_path(p)
|
||||
if sp:
|
||||
store_paths.append(sp)
|
||||
|
||||
normalized.append(
|
||||
NixProfileEntry(
|
||||
key=str(k),
|
||||
index=idx,
|
||||
name=name,
|
||||
attr_path=attr,
|
||||
store_paths=store_paths,
|
||||
)
|
||||
)
|
||||
|
||||
return normalized
|
||||
19
src/pkgmgr/actions/install/installers/nix/profile/parser.py
Normal file
19
src/pkgmgr/actions/install/installers/nix/profile/parser.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Dict
|
||||
|
||||
|
||||
def parse_profile_list_json(raw: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Parse JSON output from `nix profile list --json`.
|
||||
|
||||
Raises SystemExit with a helpful excerpt on parse failure.
|
||||
"""
|
||||
try:
|
||||
return json.loads(raw)
|
||||
except json.JSONDecodeError as e:
|
||||
excerpt = (raw or "")[:5000]
|
||||
raise SystemExit(
|
||||
f"[nix] Failed to parse `nix profile list --json`: {e}\n{excerpt}"
|
||||
) from e
|
||||
28
src/pkgmgr/actions/install/installers/nix/profile/result.py
Normal file
28
src/pkgmgr/actions/install/installers/nix/profile/result.py
Normal file
@@ -0,0 +1,28 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
def extract_stdout_text(result: Any) -> str:
|
||||
"""
|
||||
Normalize different runner return types to a stdout string.
|
||||
|
||||
Supported patterns:
|
||||
- result is str -> returned as-is
|
||||
- result is bytes/bytearray -> decoded UTF-8 (replace errors)
|
||||
- result has `.stdout` (str or bytes) -> used
|
||||
- fallback: str(result)
|
||||
"""
|
||||
if isinstance(result, str):
|
||||
return result
|
||||
|
||||
if isinstance(result, (bytes, bytearray)):
|
||||
return bytes(result).decode("utf-8", errors="replace")
|
||||
|
||||
stdout = getattr(result, "stdout", None)
|
||||
if isinstance(stdout, str):
|
||||
return stdout
|
||||
if isinstance(stdout, (bytes, bytearray)):
|
||||
return bytes(stdout).decode("utf-8", errors="replace")
|
||||
|
||||
return str(result)
|
||||
71
src/pkgmgr/actions/install/installers/nix/profile_list.py
Normal file
71
src/pkgmgr/actions/install/installers/nix/profile_list.py
Normal file
@@ -0,0 +1,71 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from .runner import CommandRunner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
|
||||
|
||||
class NixProfileListReader:
|
||||
def __init__(self, runner: CommandRunner) -> None:
|
||||
self._runner = runner
|
||||
|
||||
@staticmethod
|
||||
def _store_prefix(path: str) -> str:
|
||||
raw = (path or "").strip()
|
||||
m = re.match(r"^(/nix/store/[0-9a-z]{32}-[^/ \t]+)", raw)
|
||||
return m.group(1) if m else raw
|
||||
|
||||
def entries(self, ctx: "RepoContext") -> List[Tuple[int, str]]:
|
||||
res = self._runner.run(ctx, "nix profile list", allow_failure=True)
|
||||
if res.returncode != 0:
|
||||
return []
|
||||
|
||||
entries: List[Tuple[int, str]] = []
|
||||
pat = re.compile(
|
||||
r"^\s*(\d+)\s+.*?(/nix/store/[0-9a-z]{32}-[^/ \t]+)",
|
||||
re.MULTILINE,
|
||||
)
|
||||
|
||||
for m in pat.finditer(res.stdout or ""):
|
||||
idx_s = m.group(1)
|
||||
sp = m.group(2)
|
||||
try:
|
||||
idx = int(idx_s)
|
||||
except Exception:
|
||||
continue
|
||||
entries.append((idx, self._store_prefix(sp)))
|
||||
|
||||
seen: set[int] = set()
|
||||
uniq: List[Tuple[int, str]] = []
|
||||
for idx, sp in entries:
|
||||
if idx not in seen:
|
||||
seen.add(idx)
|
||||
uniq.append((idx, sp))
|
||||
|
||||
return uniq
|
||||
|
||||
def indices_matching_store_prefixes(
|
||||
self, ctx: "RepoContext", prefixes: List[str]
|
||||
) -> List[int]:
|
||||
prefixes = [self._store_prefix(p) for p in prefixes if p]
|
||||
prefixes = [p for p in prefixes if p]
|
||||
if not prefixes:
|
||||
return []
|
||||
|
||||
hits: List[int] = []
|
||||
for idx, sp in self.entries(ctx):
|
||||
if any(sp == p for p in prefixes):
|
||||
hits.append(idx)
|
||||
|
||||
seen: set[int] = set()
|
||||
uniq: List[int] = []
|
||||
for i in hits:
|
||||
if i not in seen:
|
||||
seen.add(i)
|
||||
uniq.append(i)
|
||||
|
||||
return uniq
|
||||
100
src/pkgmgr/actions/install/installers/nix/retry.py
Normal file
100
src/pkgmgr/actions/install/installers/nix/retry.py
Normal file
@@ -0,0 +1,100 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import random
|
||||
import time
|
||||
from dataclasses import dataclass
|
||||
from typing import Iterable, TYPE_CHECKING
|
||||
|
||||
from .types import RunResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from .runner import CommandRunner
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RetryPolicy:
|
||||
max_attempts: int = 7
|
||||
base_delay_seconds: int = 30
|
||||
jitter_seconds_min: int = 0
|
||||
jitter_seconds_max: int = 60
|
||||
|
||||
|
||||
class GitHubRateLimitRetry:
|
||||
"""
|
||||
Retries nix install commands only when the error looks like a GitHub API rate limit (HTTP 403).
|
||||
Backoff: Fibonacci(base, base, ...) + random jitter.
|
||||
"""
|
||||
|
||||
def __init__(self, policy: RetryPolicy | None = None) -> None:
|
||||
self._policy = policy or RetryPolicy()
|
||||
|
||||
def run_with_retry(
|
||||
self,
|
||||
ctx: "RepoContext",
|
||||
runner: "CommandRunner",
|
||||
install_cmd: str,
|
||||
) -> RunResult:
|
||||
quiet = bool(getattr(ctx, "quiet", False))
|
||||
delays = list(
|
||||
self._fibonacci_backoff(
|
||||
self._policy.base_delay_seconds, self._policy.max_attempts
|
||||
)
|
||||
)
|
||||
|
||||
last: RunResult | None = None
|
||||
|
||||
for attempt, base_delay in enumerate(delays, start=1):
|
||||
if not quiet:
|
||||
print(
|
||||
f"[nix] attempt {attempt}/{self._policy.max_attempts}: {install_cmd}"
|
||||
)
|
||||
|
||||
res = runner.run(ctx, install_cmd, allow_failure=True)
|
||||
last = res
|
||||
|
||||
if res.returncode == 0:
|
||||
return res
|
||||
|
||||
combined = f"{res.stdout}\n{res.stderr}"
|
||||
if not self._is_github_rate_limit_error(combined):
|
||||
return res
|
||||
|
||||
if attempt >= self._policy.max_attempts:
|
||||
break
|
||||
|
||||
jitter = random.randint(
|
||||
self._policy.jitter_seconds_min, self._policy.jitter_seconds_max
|
||||
)
|
||||
wait_time = base_delay + jitter
|
||||
|
||||
if not quiet:
|
||||
print(
|
||||
"[nix] GitHub rate limit detected (403). "
|
||||
f"Retrying in {wait_time}s (base={base_delay}s, jitter={jitter}s)..."
|
||||
)
|
||||
|
||||
time.sleep(wait_time)
|
||||
|
||||
return (
|
||||
last
|
||||
if last is not None
|
||||
else RunResult(returncode=1, stdout="", stderr="nix install retry failed")
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _is_github_rate_limit_error(text: str) -> bool:
|
||||
t = (text or "").lower()
|
||||
return (
|
||||
"http error 403" in t
|
||||
or "rate limit exceeded" in t
|
||||
or "github api rate limit" in t
|
||||
or "api rate limit exceeded" in t
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _fibonacci_backoff(base: int, attempts: int) -> Iterable[int]:
|
||||
a, b = base, base
|
||||
for _ in range(max(1, attempts)):
|
||||
yield a
|
||||
a, b = b, a + b
|
||||
67
src/pkgmgr/actions/install/installers/nix/runner.py
Normal file
67
src/pkgmgr/actions/install/installers/nix/runner.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .types import RunResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
|
||||
|
||||
class CommandRunner:
|
||||
"""
|
||||
Executes commands (shell=True) inside a repository directory (if provided).
|
||||
Supports preview mode and compact failure output logging.
|
||||
"""
|
||||
|
||||
def run(self, ctx: "RepoContext", cmd: str, allow_failure: bool) -> RunResult:
|
||||
repo_dir = getattr(ctx, "repo_dir", None) or getattr(ctx, "repo_path", None)
|
||||
preview = bool(getattr(ctx, "preview", False))
|
||||
quiet = bool(getattr(ctx, "quiet", False))
|
||||
|
||||
if preview:
|
||||
if not quiet:
|
||||
print(f"[preview] {cmd}")
|
||||
return RunResult(returncode=0, stdout="", stderr="")
|
||||
|
||||
try:
|
||||
p = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
cwd=repo_dir,
|
||||
check=False,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
except Exception as e:
|
||||
if not allow_failure:
|
||||
raise
|
||||
return RunResult(returncode=1, stdout="", stderr=str(e))
|
||||
|
||||
res = RunResult(
|
||||
returncode=p.returncode, stdout=p.stdout or "", stderr=p.stderr or ""
|
||||
)
|
||||
|
||||
if res.returncode != 0 and not quiet:
|
||||
self._print_compact_failure(res)
|
||||
|
||||
if res.returncode != 0 and not allow_failure:
|
||||
raise SystemExit(res.returncode)
|
||||
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def _print_compact_failure(res: RunResult) -> None:
|
||||
out = (res.stdout or "").strip()
|
||||
err = (res.stderr or "").strip()
|
||||
|
||||
if out:
|
||||
print("[nix] stdout (last lines):")
|
||||
print("\n".join(out.splitlines()[-20:]))
|
||||
|
||||
if err:
|
||||
print("[nix] stderr (last lines):")
|
||||
print("\n".join(err.splitlines()[-40:]))
|
||||
78
src/pkgmgr/actions/install/installers/nix/textparse.py
Normal file
78
src/pkgmgr/actions/install/installers/nix/textparse.py
Normal file
@@ -0,0 +1,78 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
|
||||
class NixConflictTextParser:
|
||||
@staticmethod
|
||||
def _store_prefix(path: str) -> str:
|
||||
raw = (path or "").strip()
|
||||
m = re.match(r"^(/nix/store/[0-9a-z]{32}-[^/ \t]+)", raw)
|
||||
return m.group(1) if m else raw
|
||||
|
||||
def remove_tokens(self, text: str) -> List[str]:
|
||||
pat = re.compile(
|
||||
r"^\s*nix profile remove\s+([^\s'\"`]+|'[^']+'|\"[^\"]+\")\s*$",
|
||||
re.MULTILINE,
|
||||
)
|
||||
|
||||
tokens: List[str] = []
|
||||
for m in pat.finditer(text or ""):
|
||||
t = (m.group(1) or "").strip()
|
||||
if (t.startswith("'") and t.endswith("'")) or (
|
||||
t.startswith('"') and t.endswith('"')
|
||||
):
|
||||
t = t[1:-1]
|
||||
if t:
|
||||
tokens.append(t)
|
||||
|
||||
seen: set[str] = set()
|
||||
uniq: List[str] = []
|
||||
for t in tokens:
|
||||
if t not in seen:
|
||||
seen.add(t)
|
||||
uniq.append(t)
|
||||
|
||||
return uniq
|
||||
|
||||
def existing_store_prefixes(self, text: str) -> List[str]:
|
||||
lines = (text or "").splitlines()
|
||||
prefixes: List[str] = []
|
||||
|
||||
in_existing = False
|
||||
in_new = False
|
||||
|
||||
store_pat = re.compile(r"^\s*(/nix/store/[0-9a-z]{32}-[^ \t]+)")
|
||||
|
||||
for raw in lines:
|
||||
line = raw.strip()
|
||||
|
||||
if "An existing package already provides the following file" in line:
|
||||
in_existing = True
|
||||
in_new = False
|
||||
continue
|
||||
|
||||
if "This is the conflicting file from the new package" in line:
|
||||
in_existing = False
|
||||
in_new = True
|
||||
continue
|
||||
|
||||
if in_existing:
|
||||
m = store_pat.match(raw)
|
||||
if m:
|
||||
prefixes.append(m.group(1))
|
||||
continue
|
||||
|
||||
_ = in_new
|
||||
|
||||
norm = [self._store_prefix(p) for p in prefixes if p]
|
||||
|
||||
seen: set[str] = set()
|
||||
uniq: List[str] = []
|
||||
for p in norm:
|
||||
if p and p not in seen:
|
||||
seen.add(p)
|
||||
uniq.append(p)
|
||||
|
||||
return uniq
|
||||
10
src/pkgmgr/actions/install/installers/nix/types.py
Normal file
10
src/pkgmgr/actions/install/installers/nix/types.py
Normal file
@@ -0,0 +1,10 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class RunResult:
|
||||
returncode: int
|
||||
stdout: str
|
||||
stderr: str
|
||||
@@ -1,165 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installer for Nix flakes.
|
||||
|
||||
If a repository contains flake.nix and the 'nix' command is available, this
|
||||
installer will try to install profile outputs from the flake.
|
||||
|
||||
Behavior:
|
||||
- If flake.nix is present and `nix` exists on PATH:
|
||||
* First remove any existing `package-manager` profile entry (best-effort).
|
||||
* Then install one or more flake outputs via `nix profile install`.
|
||||
- For the package-manager repo:
|
||||
* `pkgmgr` is mandatory (CLI), `default` is optional.
|
||||
- For all other repos:
|
||||
* `default` is mandatory.
|
||||
|
||||
Special handling:
|
||||
- If PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 is set, the installer is
|
||||
globally disabled (useful for CI or debugging).
|
||||
|
||||
The higher-level InstallationPipeline and CLI-layer model decide when this
|
||||
installer is allowed to run, based on where the current CLI comes from
|
||||
(e.g. Nix, OS packages, Python, Makefile).
|
||||
"""
|
||||
|
||||
import os
|
||||
import shutil
|
||||
from typing import TYPE_CHECKING, List, Tuple
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install import InstallContext
|
||||
|
||||
|
||||
class NixFlakeInstaller(BaseInstaller):
|
||||
"""Install Nix flake profiles for repositories that define flake.nix."""
|
||||
|
||||
# Logical layer name, used by capability matchers.
|
||||
layer = "nix"
|
||||
|
||||
FLAKE_FILE = "flake.nix"
|
||||
PROFILE_NAME = "package-manager"
|
||||
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
"""
|
||||
Only support repositories that:
|
||||
- Are NOT explicitly disabled via PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1,
|
||||
- Have a flake.nix,
|
||||
- And have the `nix` command available.
|
||||
"""
|
||||
# Optional global kill-switch for CI or debugging.
|
||||
if os.environ.get("PKGMGR_DISABLE_NIX_FLAKE_INSTALLER") == "1":
|
||||
print(
|
||||
"[INFO] PKGMGR_DISABLE_NIX_FLAKE_INSTALLER=1 – "
|
||||
"NixFlakeInstaller is disabled."
|
||||
)
|
||||
return False
|
||||
|
||||
# Nix must be available.
|
||||
if shutil.which("nix") is None:
|
||||
return False
|
||||
|
||||
# flake.nix must exist in the repository.
|
||||
flake_path = os.path.join(ctx.repo_dir, self.FLAKE_FILE)
|
||||
return os.path.exists(flake_path)
|
||||
|
||||
def _ensure_old_profile_removed(self, ctx: "RepoContext") -> None:
|
||||
"""
|
||||
Best-effort removal of an existing profile entry.
|
||||
|
||||
This handles the "already provides the following file" conflict by
|
||||
removing previous `package-manager` installations before we install
|
||||
the new one.
|
||||
|
||||
Any error in `nix profile remove` is intentionally ignored, because
|
||||
a missing profile entry is not a fatal condition.
|
||||
"""
|
||||
if shutil.which("nix") is None:
|
||||
return
|
||||
|
||||
cmd = f"nix profile remove {self.PROFILE_NAME} || true"
|
||||
try:
|
||||
# NOTE: no allow_failure here → matches the existing unit tests
|
||||
run_command(cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
except SystemExit:
|
||||
# Unit tests explicitly assert this is swallowed
|
||||
pass
|
||||
|
||||
def _profile_outputs(self, ctx: "RepoContext") -> List[Tuple[str, bool]]:
|
||||
"""
|
||||
Decide which flake outputs to install and whether failures are fatal.
|
||||
|
||||
Returns a list of (output_name, allow_failure) tuples.
|
||||
|
||||
Rules:
|
||||
- For the package-manager repo (identifier 'pkgmgr' or 'package-manager'):
|
||||
[("pkgmgr", False), ("default", True)]
|
||||
- For all other repos:
|
||||
[("default", False)]
|
||||
"""
|
||||
ident = ctx.identifier
|
||||
|
||||
if ident in {"pkgmgr", "package-manager"}:
|
||||
# pkgmgr: main CLI output is "pkgmgr" (mandatory),
|
||||
# "default" is nice-to-have (non-fatal).
|
||||
return [("pkgmgr", False), ("default", True)]
|
||||
|
||||
# Generic repos: we expect a sensible "default" package/app.
|
||||
# Failure to install it is considered fatal.
|
||||
return [("default", False)]
|
||||
|
||||
def run(self, ctx: "InstallContext") -> None:
|
||||
"""
|
||||
Install Nix flake profile outputs.
|
||||
|
||||
For the package-manager repo, failure installing 'pkgmgr' is fatal,
|
||||
failure installing 'default' is non-fatal.
|
||||
For other repos, failure installing 'default' is fatal.
|
||||
"""
|
||||
# Reuse supports() to keep logic in one place.
|
||||
if not self.supports(ctx): # type: ignore[arg-type]
|
||||
return
|
||||
|
||||
outputs = self._profile_outputs(ctx) # list of (name, allow_failure)
|
||||
|
||||
print(
|
||||
"Nix flake detected in "
|
||||
f"{ctx.identifier}, attempting to install profile outputs: "
|
||||
+ ", ".join(name for name, _ in outputs)
|
||||
)
|
||||
|
||||
# Handle the "already installed" case up-front for the shared profile.
|
||||
self._ensure_old_profile_removed(ctx) # type: ignore[arg-type]
|
||||
|
||||
for output, allow_failure in outputs:
|
||||
cmd = f"nix profile install {ctx.repo_dir}#{output}"
|
||||
print(f"[INFO] Running: {cmd}")
|
||||
ret = os.system(cmd)
|
||||
|
||||
# Extract real exit code from os.system() result
|
||||
if os.WIFEXITED(ret):
|
||||
exit_code = os.WEXITSTATUS(ret)
|
||||
else:
|
||||
# abnormal termination (signal etc.) – keep raw value
|
||||
exit_code = ret
|
||||
|
||||
if exit_code == 0:
|
||||
print(f"Nix flake output '{output}' successfully installed.")
|
||||
continue
|
||||
|
||||
print(f"[Error] Failed to install Nix flake output '{output}'")
|
||||
print(f"[Error] Command exited with code {exit_code}")
|
||||
|
||||
if not allow_failure:
|
||||
raise SystemExit(exit_code)
|
||||
|
||||
print(
|
||||
"[Warning] Continuing despite failure to install "
|
||||
f"optional output '{output}'."
|
||||
)
|
||||
@@ -1,104 +1,42 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
PythonInstaller — install Python projects defined via pyproject.toml.
|
||||
|
||||
Installation rules:
|
||||
|
||||
1. pip command resolution:
|
||||
a) If PKGMGR_PIP is set → use it exactly as provided.
|
||||
b) Else if running inside a virtualenv → use `sys.executable -m pip`.
|
||||
c) Else → create/use a per-repository virtualenv under ~/.venvs/<repo>/.
|
||||
|
||||
2. Installation target:
|
||||
- Always install into the resolved pip environment.
|
||||
- Never modify system Python, never rely on --user.
|
||||
- Nix-immutable systems (PEP 668) are automatically avoided because we
|
||||
never touch system Python.
|
||||
|
||||
3. The installer is skipped when:
|
||||
- PKGMGR_DISABLE_PYTHON_INSTALLER=1 is set.
|
||||
- The repository has no pyproject.toml.
|
||||
|
||||
All pip failures are treated as fatal.
|
||||
"""
|
||||
|
||||
# src/pkgmgr/actions/install/installers/python.py
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from pkgmgr.actions.install.installers.base import BaseInstaller
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.core.command.run import run_command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from pkgmgr.actions.install.context import RepoContext
|
||||
from pkgmgr.actions.install import InstallContext
|
||||
|
||||
|
||||
class PythonInstaller(BaseInstaller):
|
||||
"""Install Python projects and dependencies via pip using isolated environments."""
|
||||
|
||||
layer = "python"
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Installer activation logic
|
||||
# ----------------------------------------------------------------------
|
||||
def supports(self, ctx: "RepoContext") -> bool:
|
||||
"""
|
||||
Return True if this installer should handle this repository.
|
||||
|
||||
The installer is active only when:
|
||||
- A pyproject.toml exists in the repo, and
|
||||
- PKGMGR_DISABLE_PYTHON_INSTALLER is not set.
|
||||
"""
|
||||
def supports(self, ctx: RepoContext) -> bool:
|
||||
if os.environ.get("PKGMGR_DISABLE_PYTHON_INSTALLER") == "1":
|
||||
print("[INFO] PythonInstaller disabled via PKGMGR_DISABLE_PYTHON_INSTALLER.")
|
||||
print(
|
||||
"[INFO] PythonInstaller disabled via PKGMGR_DISABLE_PYTHON_INSTALLER."
|
||||
)
|
||||
return False
|
||||
|
||||
return os.path.exists(os.path.join(ctx.repo_dir, "pyproject.toml"))
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Virtualenv handling
|
||||
# ----------------------------------------------------------------------
|
||||
def _in_virtualenv(self) -> bool:
|
||||
"""Detect whether the current interpreter is inside a venv."""
|
||||
if os.environ.get("VIRTUAL_ENV"):
|
||||
return True
|
||||
|
||||
base = getattr(sys, "base_prefix", sys.prefix)
|
||||
return sys.prefix != base
|
||||
|
||||
def _ensure_repo_venv(self, ctx: "InstallContext") -> str:
|
||||
"""
|
||||
Ensure that ~/.venvs/<identifier>/ exists and contains a minimal venv.
|
||||
|
||||
Returns the venv directory path.
|
||||
"""
|
||||
def _ensure_repo_venv(self, ctx: RepoContext) -> str:
|
||||
venv_dir = os.path.expanduser(f"~/.venvs/{ctx.identifier}")
|
||||
python = sys.executable
|
||||
|
||||
if not os.path.isdir(venv_dir):
|
||||
print(f"[python-installer] Creating virtualenv: {venv_dir}")
|
||||
subprocess.check_call([python, "-m", "venv", venv_dir])
|
||||
if not os.path.exists(venv_dir):
|
||||
run_command(f"{python} -m venv {venv_dir}", preview=ctx.preview)
|
||||
|
||||
return venv_dir
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# pip command resolution
|
||||
# ----------------------------------------------------------------------
|
||||
def _pip_cmd(self, ctx: "InstallContext") -> str:
|
||||
"""
|
||||
Determine which pip command to use.
|
||||
|
||||
Priority:
|
||||
1. PKGMGR_PIP override given by user or automation.
|
||||
2. Active virtualenv → use sys.executable -m pip.
|
||||
3. Per-repository venv → ~/.venvs/<repo>/bin/pip
|
||||
"""
|
||||
def _pip_cmd(self, ctx: RepoContext) -> str:
|
||||
explicit = os.environ.get("PKGMGR_PIP", "").strip()
|
||||
if explicit:
|
||||
return explicit
|
||||
@@ -107,33 +45,19 @@ class PythonInstaller(BaseInstaller):
|
||||
return f"{sys.executable} -m pip"
|
||||
|
||||
venv_dir = self._ensure_repo_venv(ctx)
|
||||
pip_path = os.path.join(venv_dir, "bin", "pip")
|
||||
return pip_path
|
||||
return os.path.join(venv_dir, "bin", "pip")
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Execution
|
||||
# ----------------------------------------------------------------------
|
||||
def run(self, ctx: "InstallContext") -> None:
|
||||
"""
|
||||
Install the project defined by pyproject.toml.
|
||||
|
||||
Uses the resolved pip environment. Installation is isolated and never
|
||||
touches system Python.
|
||||
"""
|
||||
if not self.supports(ctx): # type: ignore[arg-type]
|
||||
return
|
||||
|
||||
pyproject = os.path.join(ctx.repo_dir, "pyproject.toml")
|
||||
if not os.path.exists(pyproject):
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
if not self.supports(ctx):
|
||||
return
|
||||
|
||||
print(f"[python-installer] Installing Python project for {ctx.identifier}...")
|
||||
|
||||
pip_cmd = self._pip_cmd(ctx)
|
||||
run_command(f"{pip_cmd} install .", cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
|
||||
# Final install command: ALWAYS isolated, never system-wide.
|
||||
install_cmd = f"{pip_cmd} install ."
|
||||
|
||||
run_command(install_cmd, cwd=ctx.repo_dir, preview=ctx.preview)
|
||||
if ctx.force_update:
|
||||
# test-visible marker
|
||||
print(f"[python-installer] repo '{ctx.identifier}' successfully upgraded.")
|
||||
|
||||
print(f"[python-installer] Installation finished for {ctx.identifier}.")
|
||||
|
||||
@@ -1,21 +1,9 @@
|
||||
# src/pkgmgr/actions/install/pipeline.py
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Installation pipeline orchestration for repositories.
|
||||
|
||||
This module implements the "Setup Controller" logic:
|
||||
|
||||
1. Detect current CLI command for the repo (if any).
|
||||
2. Classify it into a layer (os-packages, nix, python, makefile).
|
||||
3. Iterate over installers in layer order:
|
||||
- Skip installers whose layer is weaker than an already-loaded one.
|
||||
- Run only installers that support() the repo and add new capabilities.
|
||||
- After each installer, re-resolve the command and update the layer.
|
||||
4. Maintain the repo["command"] field and create/update symlinks via create_ink().
|
||||
|
||||
The goal is to prevent conflicting installations and make the layering
|
||||
behaviour explicit and testable.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -36,34 +24,15 @@ from pkgmgr.core.command.resolve import resolve_command_for_repo
|
||||
|
||||
@dataclass
|
||||
class CommandState:
|
||||
"""
|
||||
Represents the current CLI state for a repository:
|
||||
|
||||
- command: absolute or relative path to the CLI entry point
|
||||
- layer: which conceptual layer this command belongs to
|
||||
"""
|
||||
|
||||
command: Optional[str]
|
||||
layer: Optional[CliLayer]
|
||||
|
||||
|
||||
class CommandResolver:
|
||||
"""
|
||||
Small helper responsible for resolving the current command for a repo
|
||||
and mapping it into a CommandState.
|
||||
"""
|
||||
|
||||
def __init__(self, ctx: RepoContext) -> None:
|
||||
self._ctx = ctx
|
||||
|
||||
def resolve(self) -> CommandState:
|
||||
"""
|
||||
Resolve the current command for this repository.
|
||||
|
||||
If resolve_command_for_repo raises SystemExit (e.g. Python package
|
||||
without installed entry point), we treat this as "no command yet"
|
||||
from the point of view of the installers.
|
||||
"""
|
||||
repo = self._ctx.repo
|
||||
identifier = self._ctx.identifier
|
||||
repo_dir = self._ctx.repo_dir
|
||||
@@ -85,28 +54,10 @@ class CommandResolver:
|
||||
|
||||
|
||||
class InstallationPipeline:
|
||||
"""
|
||||
High-level orchestrator that applies a sequence of installers
|
||||
to a repository based on CLI layer precedence.
|
||||
"""
|
||||
|
||||
def __init__(self, installers: Sequence[BaseInstaller]) -> None:
|
||||
self._installers = list(installers)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Public API
|
||||
# ------------------------------------------------------------------
|
||||
def run(self, ctx: RepoContext) -> None:
|
||||
"""
|
||||
Execute the installation pipeline for a single repository.
|
||||
|
||||
- Detect initial command & layer.
|
||||
- Optionally create a symlink.
|
||||
- Run installers in order, skipping those whose layer is weaker
|
||||
than an already-loaded CLI.
|
||||
- After each installer, re-resolve the command and refresh the
|
||||
symlink if needed.
|
||||
"""
|
||||
repo = ctx.repo
|
||||
repo_dir = ctx.repo_dir
|
||||
identifier = ctx.identifier
|
||||
@@ -119,7 +70,6 @@ class InstallationPipeline:
|
||||
resolver = CommandResolver(ctx)
|
||||
state = resolver.resolve()
|
||||
|
||||
# Persist initial command (if any) and create a symlink.
|
||||
if state.command:
|
||||
repo["command"] = state.command
|
||||
create_ink(
|
||||
@@ -135,11 +85,9 @@ class InstallationPipeline:
|
||||
|
||||
provided_capabilities: Set[str] = set()
|
||||
|
||||
# Main installer loop
|
||||
for installer in self._installers:
|
||||
layer_name = getattr(installer, "layer", None)
|
||||
|
||||
# Installers without a layer participate without precedence logic.
|
||||
if layer_name is None:
|
||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||
continue
|
||||
@@ -147,17 +95,13 @@ class InstallationPipeline:
|
||||
try:
|
||||
installer_layer = CliLayer(layer_name)
|
||||
except ValueError:
|
||||
# Unknown layer string → treat as lowest priority.
|
||||
installer_layer = None
|
||||
|
||||
# "Previous/Current layer already loaded?"
|
||||
if state.layer is not None and installer_layer is not None:
|
||||
current_prio = layer_priority(state.layer)
|
||||
installer_prio = layer_priority(installer_layer)
|
||||
|
||||
if current_prio < installer_prio:
|
||||
# Current CLI comes from a higher-priority layer,
|
||||
# so we skip this installer entirely.
|
||||
if not quiet:
|
||||
print(
|
||||
"[pkgmgr] Skipping installer "
|
||||
@@ -166,9 +110,7 @@ class InstallationPipeline:
|
||||
)
|
||||
continue
|
||||
|
||||
if current_prio == installer_prio:
|
||||
# Same layer already provides a CLI; usually there is no
|
||||
# need to run another installer on top of it.
|
||||
if current_prio == installer_prio and not ctx.force_update:
|
||||
if not quiet:
|
||||
print(
|
||||
"[pkgmgr] Skipping installer "
|
||||
@@ -177,12 +119,9 @@ class InstallationPipeline:
|
||||
)
|
||||
continue
|
||||
|
||||
# Check if this installer is applicable at all.
|
||||
if not installer.supports(ctx):
|
||||
continue
|
||||
|
||||
# Capabilities: if everything this installer would provide is already
|
||||
# covered, we can safely skip it.
|
||||
caps = installer.discover_capabilities(ctx)
|
||||
if caps and caps.issubset(provided_capabilities):
|
||||
if not quiet:
|
||||
@@ -193,18 +132,26 @@ class InstallationPipeline:
|
||||
continue
|
||||
|
||||
if not quiet:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' "
|
||||
f"(new capabilities: {caps or set()})..."
|
||||
)
|
||||
if (
|
||||
ctx.force_update
|
||||
and state.layer is not None
|
||||
and installer_layer == state.layer
|
||||
):
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' (upgrade requested)..."
|
||||
)
|
||||
else:
|
||||
print(
|
||||
f"[pkgmgr] Running installer {installer.__class__.__name__} "
|
||||
f"for {identifier} in '{repo_dir}' "
|
||||
f"(new capabilities: {caps or set()})..."
|
||||
)
|
||||
|
||||
# Run the installer with error reporting.
|
||||
self._run_installer(installer, ctx, identifier, repo_dir, quiet)
|
||||
|
||||
provided_capabilities.update(caps)
|
||||
|
||||
# After running an installer, re-resolve the command and layer.
|
||||
new_state = resolver.resolve()
|
||||
if new_state.command:
|
||||
repo["command"] = new_state.command
|
||||
@@ -221,9 +168,6 @@ class InstallationPipeline:
|
||||
|
||||
state = new_state
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Internal helpers
|
||||
# ------------------------------------------------------------------
|
||||
@staticmethod
|
||||
def _run_installer(
|
||||
installer: BaseInstaller,
|
||||
@@ -232,9 +176,6 @@ class InstallationPipeline:
|
||||
repo_dir: str,
|
||||
quiet: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Execute a single installer with unified error handling.
|
||||
"""
|
||||
try:
|
||||
installer.run(ctx)
|
||||
except SystemExit as exc:
|
||||
|
||||
@@ -14,6 +14,7 @@ from .list_cmd import list_mirrors
|
||||
from .diff_cmd import diff_mirrors
|
||||
from .merge_cmd import merge_mirrors
|
||||
from .setup_cmd import setup_mirrors
|
||||
from .visibility_cmd import set_mirror_visibility
|
||||
|
||||
__all__ = [
|
||||
"Repository",
|
||||
@@ -22,4 +23,5 @@ __all__ = [
|
||||
"diff_mirrors",
|
||||
"merge_mirrors",
|
||||
"setup_mirrors",
|
||||
"set_mirror_visibility",
|
||||
]
|
||||
|
||||
@@ -1,20 +1,50 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Optional, Set
|
||||
|
||||
from pkgmgr.core.command.run import run_command
|
||||
from pkgmgr.core.git import GitError, run_git
|
||||
from typing import List, Optional, Set
|
||||
from pkgmgr.core.git.errors import GitRunError
|
||||
from pkgmgr.core.git.commands import (
|
||||
GitAddRemoteError,
|
||||
GitAddRemotePushUrlError,
|
||||
GitSetRemoteUrlError,
|
||||
add_remote,
|
||||
add_remote_push_url,
|
||||
set_remote_url,
|
||||
)
|
||||
from pkgmgr.core.git.queries import get_remote_push_urls, list_remotes
|
||||
|
||||
from .types import MirrorMap, RepoMirrorContext, Repository
|
||||
|
||||
|
||||
def build_default_ssh_url(repo: Repository) -> Optional[str]:
|
||||
def _is_git_remote_url(url: str) -> bool:
|
||||
"""
|
||||
Build a simple SSH URL from repo config if no explicit mirror is defined.
|
||||
True only for URLs that should become git remotes / push URLs.
|
||||
|
||||
Example: git@github.com:account/repository.git
|
||||
Accepted:
|
||||
- git@host:owner/repo(.git) (SCP-like SSH)
|
||||
- ssh://git@host(:port)/owner/repo(.git) (SSH URL)
|
||||
- https://host/owner/repo.git (HTTPS git remote)
|
||||
- http://host/owner/repo.git (rare, but possible)
|
||||
Everything else (e.g. PyPI project page) stays metadata only.
|
||||
"""
|
||||
u = (url or "").strip()
|
||||
if not u:
|
||||
return False
|
||||
|
||||
if u.startswith("git@"):
|
||||
return True
|
||||
|
||||
if u.startswith("ssh://"):
|
||||
return True
|
||||
|
||||
if (u.startswith("https://") or u.startswith("http://")) and u.endswith(".git"):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def build_default_ssh_url(repo: Repository) -> Optional[str]:
|
||||
provider = repo.get("provider")
|
||||
account = repo.get("account")
|
||||
name = repo.get("repository")
|
||||
@@ -23,96 +53,80 @@ def build_default_ssh_url(repo: Repository) -> Optional[str]:
|
||||
if not provider or not account or not name:
|
||||
return None
|
||||
|
||||
provider = str(provider)
|
||||
account = str(account)
|
||||
name = str(name)
|
||||
|
||||
if port:
|
||||
return f"ssh://git@{provider}:{port}/{account}/{name}.git"
|
||||
|
||||
# GitHub-style shorthand
|
||||
return f"git@{provider}:{account}/{name}.git"
|
||||
|
||||
|
||||
def _git_mirrors_only(m: MirrorMap) -> MirrorMap:
|
||||
return {k: v for k, v in m.items() if v and _is_git_remote_url(v)}
|
||||
|
||||
|
||||
def determine_primary_remote_url(
|
||||
repo: Repository,
|
||||
resolved_mirrors: MirrorMap,
|
||||
ctx: RepoMirrorContext,
|
||||
) -> Optional[str]:
|
||||
"""
|
||||
Determine the primary remote URL in a consistent way:
|
||||
|
||||
1. resolved_mirrors["origin"]
|
||||
2. any resolved mirror (first by name)
|
||||
3. default SSH URL from provider/account/repository
|
||||
Priority order (GIT URLS ONLY):
|
||||
1. origin from resolved mirrors (if it is a git URL)
|
||||
2. first git URL from MIRRORS file (in file order)
|
||||
3. first git URL from config mirrors (in config order)
|
||||
4. default SSH URL
|
||||
"""
|
||||
if "origin" in resolved_mirrors:
|
||||
return resolved_mirrors["origin"]
|
||||
resolved = ctx.resolved_mirrors
|
||||
origin = resolved.get("origin")
|
||||
if origin and _is_git_remote_url(origin):
|
||||
return origin
|
||||
|
||||
if resolved_mirrors:
|
||||
first_name = sorted(resolved_mirrors.keys())[0]
|
||||
return resolved_mirrors[first_name]
|
||||
for mirrors in (ctx.file_mirrors, ctx.config_mirrors):
|
||||
for _, url in mirrors.items():
|
||||
if url and _is_git_remote_url(url):
|
||||
return url
|
||||
|
||||
return build_default_ssh_url(repo)
|
||||
|
||||
|
||||
def _safe_git_output(args: List[str], cwd: str) -> Optional[str]:
|
||||
"""
|
||||
Run a Git command via run_git and return its stdout, or None on failure.
|
||||
"""
|
||||
try:
|
||||
return run_git(args, cwd=cwd)
|
||||
except GitError:
|
||||
return None
|
||||
|
||||
|
||||
def current_origin_url(repo_dir: str) -> Optional[str]:
|
||||
"""
|
||||
Return the current URL for remote 'origin', or None if not present.
|
||||
"""
|
||||
output = _safe_git_output(["remote", "get-url", "origin"], cwd=repo_dir)
|
||||
if not output:
|
||||
return None
|
||||
url = output.strip()
|
||||
return url or None
|
||||
|
||||
|
||||
def has_origin_remote(repo_dir: str) -> bool:
|
||||
"""
|
||||
Check whether a remote called 'origin' exists in the repository.
|
||||
"""
|
||||
output = _safe_git_output(["remote"], cwd=repo_dir)
|
||||
if not output:
|
||||
try:
|
||||
return "origin" in list_remotes(cwd=repo_dir)
|
||||
except GitRunError:
|
||||
return False
|
||||
names = output.split()
|
||||
return "origin" in names
|
||||
|
||||
|
||||
def _ensure_push_urls_for_origin(
|
||||
def _set_origin_fetch_and_push(repo_dir: str, url: str, preview: bool) -> None:
|
||||
"""
|
||||
Ensure origin has fetch URL and push URL set to the primary URL.
|
||||
Preview is handled by the underlying git runner.
|
||||
"""
|
||||
set_remote_url("origin", url, cwd=repo_dir, push=False, preview=preview)
|
||||
set_remote_url("origin", url, cwd=repo_dir, push=True, preview=preview)
|
||||
|
||||
|
||||
def _ensure_additional_push_urls(
|
||||
repo_dir: str,
|
||||
mirrors: MirrorMap,
|
||||
primary: str,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure that all mirror URLs are present as push URLs on 'origin'.
|
||||
Ensure all *git* mirror URLs (except primary) are configured as additional
|
||||
push URLs for origin.
|
||||
|
||||
Non-git URLs (like PyPI) are ignored and will never land in git config.
|
||||
"""
|
||||
desired: Set[str] = {url for url in mirrors.values() if url}
|
||||
git_only = _git_mirrors_only(mirrors)
|
||||
desired: Set[str] = {u for u in git_only.values() if u and u != primary}
|
||||
if not desired:
|
||||
return
|
||||
|
||||
existing_output = _safe_git_output(
|
||||
["remote", "get-url", "--push", "--all", "origin"],
|
||||
cwd=repo_dir,
|
||||
)
|
||||
existing = set(existing_output.splitlines()) if existing_output else set()
|
||||
try:
|
||||
existing = get_remote_push_urls("origin", cwd=repo_dir)
|
||||
except GitRunError:
|
||||
existing = set()
|
||||
|
||||
missing = sorted(desired - existing)
|
||||
for url in missing:
|
||||
cmd = f"git remote set-url --add --push origin {url}"
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run in {repo_dir!r}: {cmd}")
|
||||
else:
|
||||
print(f"[INFO] Adding push URL to 'origin': {url}")
|
||||
run_command(cmd, cwd=repo_dir, preview=False)
|
||||
for url in sorted(desired - existing):
|
||||
add_remote_push_url("origin", url, cwd=repo_dir, preview=preview)
|
||||
|
||||
|
||||
def ensure_origin_remote(
|
||||
@@ -120,60 +134,33 @@ def ensure_origin_remote(
|
||||
ctx: RepoMirrorContext,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure that a usable 'origin' remote exists and has all push URLs.
|
||||
"""
|
||||
repo_dir = ctx.repo_dir
|
||||
resolved_mirrors = ctx.resolved_mirrors
|
||||
|
||||
if not os.path.isdir(os.path.join(repo_dir, ".git")):
|
||||
print(f"[WARN] {repo_dir} is not a Git repository (no .git directory).")
|
||||
print(f"[WARN] {repo_dir} is not a Git repository.")
|
||||
return
|
||||
|
||||
url = determine_primary_remote_url(repo, resolved_mirrors)
|
||||
primary = determine_primary_remote_url(repo, ctx)
|
||||
if not primary or not _is_git_remote_url(primary):
|
||||
print("[WARN] No valid git primary mirror URL could be determined.")
|
||||
return
|
||||
|
||||
# 1) Ensure origin exists
|
||||
if not has_origin_remote(repo_dir):
|
||||
if not url:
|
||||
print(
|
||||
"[WARN] Could not determine URL for 'origin' remote. "
|
||||
"Please configure mirrors or provider/account/repository."
|
||||
)
|
||||
return
|
||||
try:
|
||||
add_remote("origin", primary, cwd=repo_dir, preview=preview)
|
||||
except GitAddRemoteError as exc:
|
||||
print(f"[WARN] Failed to add origin remote: {exc}")
|
||||
return # without origin we cannot reliably proceed
|
||||
|
||||
cmd = f"git remote add origin {url}"
|
||||
if preview:
|
||||
print(f"[PREVIEW] Would run in {repo_dir!r}: {cmd}")
|
||||
else:
|
||||
print(f"[INFO] Adding 'origin' remote in {repo_dir}: {url}")
|
||||
run_command(cmd, cwd=repo_dir, preview=False)
|
||||
else:
|
||||
current = current_origin_url(repo_dir)
|
||||
if current == url or not url:
|
||||
print(
|
||||
"[INFO] 'origin' already points to "
|
||||
f"{current or '<unknown>'} (no change needed)."
|
||||
)
|
||||
else:
|
||||
# We do not auto-change origin here, only log the mismatch.
|
||||
print(
|
||||
"[INFO] 'origin' exists with URL "
|
||||
f"{current or '<unknown>'}; not changing to {url}."
|
||||
)
|
||||
|
||||
# Ensure all mirrors are present as push URLs
|
||||
_ensure_push_urls_for_origin(repo_dir, resolved_mirrors, preview)
|
||||
|
||||
|
||||
def is_remote_reachable(url: str, cwd: Optional[str] = None) -> bool:
|
||||
"""
|
||||
Check whether a remote repository is reachable via `git ls-remote`.
|
||||
|
||||
This does NOT modify anything; it only probes the remote.
|
||||
"""
|
||||
workdir = cwd or os.getcwd()
|
||||
# 2) Ensure origin fetch+push URLs are correct
|
||||
try:
|
||||
# --exit-code → non-zero exit code if the remote does not exist
|
||||
run_git(["ls-remote", "--exit-code", url], cwd=workdir)
|
||||
return True
|
||||
except GitError:
|
||||
return False
|
||||
_set_origin_fetch_and_push(repo_dir, primary, preview)
|
||||
except GitSetRemoteUrlError as exc:
|
||||
print(f"[WARN] Failed to set origin URLs: {exc}")
|
||||
|
||||
# 3) Ensure additional push URLs for mirrors (git urls only)
|
||||
try:
|
||||
_ensure_additional_push_urls(repo_dir, ctx.resolved_mirrors, primary, preview)
|
||||
except GitAddRemotePushUrlError as exc:
|
||||
print(f"[WARN] Failed to add additional push URLs: {exc}")
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from collections.abc import Iterable, Mapping
|
||||
from typing import Union
|
||||
from urllib.parse import urlparse
|
||||
from typing import Mapping
|
||||
|
||||
from .types import MirrorMap, Repository
|
||||
|
||||
@@ -32,7 +33,7 @@ def read_mirrors_file(repo_dir: str, filename: str = "MIRRORS") -> MirrorMap:
|
||||
"""
|
||||
Supports:
|
||||
NAME URL
|
||||
URL → auto name = hostname
|
||||
URL -> auto-generate name from hostname
|
||||
"""
|
||||
path = os.path.join(repo_dir, filename)
|
||||
mirrors: MirrorMap = {}
|
||||
@@ -52,7 +53,8 @@ def read_mirrors_file(repo_dir: str, filename: str = "MIRRORS") -> MirrorMap:
|
||||
# Case 1: "name url"
|
||||
if len(parts) == 2:
|
||||
name, url = parts
|
||||
# Case 2: "url" → auto-generate name
|
||||
|
||||
# Case 2: "url" -> auto name
|
||||
elif len(parts) == 1:
|
||||
url = parts[0]
|
||||
parsed = urlparse(url)
|
||||
@@ -67,21 +69,56 @@ def read_mirrors_file(repo_dir: str, filename: str = "MIRRORS") -> MirrorMap:
|
||||
continue
|
||||
|
||||
mirrors[name] = url
|
||||
|
||||
except OSError as exc:
|
||||
print(f"[WARN] Could not read MIRRORS file at {path}: {exc}")
|
||||
|
||||
return mirrors
|
||||
|
||||
|
||||
MirrorsInput = Union[Mapping[str, str], Iterable[str]]
|
||||
|
||||
|
||||
def write_mirrors_file(
|
||||
repo_dir: str,
|
||||
mirrors: Mapping[str, str],
|
||||
mirrors: MirrorsInput,
|
||||
filename: str = "MIRRORS",
|
||||
preview: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Write MIRRORS in one of two formats:
|
||||
|
||||
1) Mapping[str, str] -> "NAME URL" per line (legacy / compatible)
|
||||
2) Iterable[str] -> "URL" per line (new preferred)
|
||||
|
||||
Strings are treated as a single URL (not iterated character-by-character).
|
||||
"""
|
||||
path = os.path.join(repo_dir, filename)
|
||||
lines = [f"{name} {url}" for name, url in sorted(mirrors.items())]
|
||||
|
||||
lines: list[str]
|
||||
|
||||
if isinstance(mirrors, Mapping):
|
||||
items = [
|
||||
(str(name), str(url))
|
||||
for name, url in mirrors.items()
|
||||
if url is not None and str(url).strip()
|
||||
]
|
||||
items.sort(key=lambda x: (x[0], x[1]))
|
||||
lines = [f"{name} {url}" for name, url in items]
|
||||
|
||||
else:
|
||||
if isinstance(mirrors, (str, bytes)):
|
||||
urls = [str(mirrors).strip()]
|
||||
else:
|
||||
urls = [
|
||||
str(url).strip()
|
||||
for url in mirrors
|
||||
if url is not None and str(url).strip()
|
||||
]
|
||||
|
||||
urls = sorted(set(urls))
|
||||
lines = urls
|
||||
|
||||
content = "\n".join(lines) + ("\n" if lines else "")
|
||||
|
||||
if preview:
|
||||
@@ -94,5 +131,6 @@ def write_mirrors_file(
|
||||
with open(path, "w", encoding="utf-8") as fh:
|
||||
fh.write(content)
|
||||
print(f"[INFO] Wrote MIRRORS file at {path}")
|
||||
|
||||
except OSError as exc:
|
||||
print(f"[ERROR] Failed to write MIRRORS file at {path}: {exc}")
|
||||
|
||||
@@ -16,6 +16,7 @@ from .types import MirrorMap, Repository
|
||||
# Helpers
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _repo_key(repo: Repository) -> Tuple[str, str, str]:
|
||||
"""
|
||||
Normalised key for identifying a repository in config files.
|
||||
@@ -47,6 +48,7 @@ def _load_user_config(path: str) -> Dict[str, object]:
|
||||
# Main merge command
|
||||
# -----------------------------------------------------------------------------
|
||||
|
||||
|
||||
def merge_mirrors(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
|
||||
86
src/pkgmgr/actions/mirror/remote_provision.py
Normal file
86
src/pkgmgr/actions/mirror/remote_provision.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from pkgmgr.core.remote_provisioning import ProviderHint, RepoSpec, ensure_remote_repo
|
||||
from pkgmgr.core.remote_provisioning.ensure import EnsureOptions
|
||||
|
||||
from .context import build_context
|
||||
from .git_remote import determine_primary_remote_url
|
||||
from .types import Repository
|
||||
from .url_utils import normalize_provider_host, parse_repo_from_git_url
|
||||
|
||||
|
||||
def _provider_hint_from_host(host: str) -> str | None:
|
||||
h = (host or "").lower()
|
||||
if h == "github.com":
|
||||
return "github"
|
||||
# Best-effort default for self-hosted git domains
|
||||
return "gitea" if h else None
|
||||
|
||||
|
||||
def ensure_remote_repository_for_url(
|
||||
*,
|
||||
url: str,
|
||||
private_default: bool,
|
||||
description: str,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
host_raw, owner, name = parse_repo_from_git_url(url)
|
||||
host = normalize_provider_host(host_raw)
|
||||
|
||||
if not host or not owner or not name:
|
||||
print(f"[WARN] Could not parse repo from URL: {url}")
|
||||
return
|
||||
|
||||
spec = RepoSpec(
|
||||
host=host,
|
||||
owner=owner,
|
||||
name=name,
|
||||
private=private_default,
|
||||
description=description,
|
||||
)
|
||||
|
||||
provider_kind = _provider_hint_from_host(host)
|
||||
|
||||
try:
|
||||
result = ensure_remote_repo(
|
||||
spec,
|
||||
provider_hint=ProviderHint(kind=provider_kind),
|
||||
options=EnsureOptions(
|
||||
preview=preview,
|
||||
interactive=True,
|
||||
allow_prompt=True,
|
||||
save_prompt_token_to_keyring=True,
|
||||
),
|
||||
)
|
||||
print(f"[REMOTE ENSURE] {result.status.upper()}: {result.message}")
|
||||
if result.url:
|
||||
print(f"[REMOTE ENSURE] URL: {result.url}")
|
||||
except Exception as exc: # noqa: BLE001
|
||||
print(f"[ERROR] Remote provisioning failed for {url!r}: {exc}")
|
||||
|
||||
|
||||
def ensure_remote_repository(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Backwards-compatible wrapper: ensure the *primary* remote repository
|
||||
derived from the primary URL.
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
primary_url = determine_primary_remote_url(repo, ctx)
|
||||
if not primary_url:
|
||||
print("[INFO] No primary URL found; skipping remote provisioning.")
|
||||
return
|
||||
|
||||
ensure_remote_repository_for_url(
|
||||
url=primary_url,
|
||||
private_default=bool(repo.get("private", True)),
|
||||
description=str(repo.get("description", "")),
|
||||
preview=preview,
|
||||
)
|
||||
@@ -1,12 +1,89 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List, Tuple
|
||||
from typing import List
|
||||
|
||||
from pkgmgr.core.git import run_git, GitError
|
||||
from pkgmgr.core.git.queries import probe_remote_reachable_detail
|
||||
from pkgmgr.core.remote_provisioning import ProviderHint, RepoSpec, set_repo_visibility
|
||||
from pkgmgr.core.remote_provisioning.visibility import VisibilityOptions
|
||||
|
||||
from .context import build_context
|
||||
from .git_remote import determine_primary_remote_url, ensure_origin_remote
|
||||
from .remote_provision import ensure_remote_repository_for_url
|
||||
from .types import Repository
|
||||
from .url_utils import normalize_provider_host, parse_repo_from_git_url
|
||||
|
||||
|
||||
def _is_git_remote_url(url: str) -> bool:
|
||||
# Keep the same filtering semantics as in git_remote.py (duplicated on purpose
|
||||
# to keep setup_cmd independent of private helpers).
|
||||
u = (url or "").strip()
|
||||
if not u:
|
||||
return False
|
||||
if u.startswith("git@"):
|
||||
return True
|
||||
if u.startswith("ssh://"):
|
||||
return True
|
||||
if (u.startswith("https://") or u.startswith("http://")) and u.endswith(".git"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _provider_hint_from_host(host: str) -> str | None:
|
||||
h = (host or "").lower()
|
||||
if h == "github.com":
|
||||
return "github"
|
||||
return "gitea" if h else None
|
||||
|
||||
|
||||
def _apply_visibility_for_url(
|
||||
*,
|
||||
url: str,
|
||||
private: bool,
|
||||
description: str,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
host_raw, owner, name = parse_repo_from_git_url(url)
|
||||
host = normalize_provider_host(host_raw)
|
||||
|
||||
if not host or not owner or not name:
|
||||
print(f"[WARN] Could not parse repo from URL: {url}")
|
||||
return
|
||||
|
||||
spec = RepoSpec(
|
||||
host=host,
|
||||
owner=owner,
|
||||
name=name,
|
||||
private=private,
|
||||
description=description,
|
||||
)
|
||||
|
||||
provider_kind = _provider_hint_from_host(host)
|
||||
res = set_repo_visibility(
|
||||
spec,
|
||||
private=private,
|
||||
provider_hint=ProviderHint(kind=provider_kind),
|
||||
options=VisibilityOptions(preview=preview),
|
||||
)
|
||||
print(f"[REMOTE VISIBILITY] {res.status.upper()}: {res.message}")
|
||||
|
||||
|
||||
def _print_probe_result(name: str | None, url: str, *, cwd: str) -> None:
|
||||
"""
|
||||
Print probe result for a git remote URL, including a short failure reason.
|
||||
"""
|
||||
ok, reason = probe_remote_reachable_detail(url, cwd=cwd)
|
||||
|
||||
prefix = f"{name}: " if name else ""
|
||||
if ok:
|
||||
print(f"[OK] {prefix}{url}")
|
||||
return
|
||||
|
||||
print(f"[WARN] {prefix}{url}")
|
||||
if reason:
|
||||
reason = reason.strip()
|
||||
if len(reason) > 240:
|
||||
reason = reason[:240].rstrip() + "…"
|
||||
print(f" reason: {reason}")
|
||||
|
||||
|
||||
def _setup_local_mirrors_for_repo(
|
||||
@@ -15,9 +92,6 @@ def _setup_local_mirrors_for_repo(
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
) -> None:
|
||||
"""
|
||||
Ensure local Git state is sane (currently: 'origin' remote).
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
@@ -25,106 +99,98 @@ def _setup_local_mirrors_for_repo(
|
||||
print(f"[MIRROR SETUP:LOCAL] dir: {ctx.repo_dir}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
ensure_origin_remote(repo, ctx, preview=preview)
|
||||
ensure_origin_remote(repo, ctx, preview)
|
||||
print()
|
||||
|
||||
|
||||
def _probe_mirror(url: str, repo_dir: str) -> Tuple[bool, str]:
|
||||
"""
|
||||
Probe a remote mirror by running `git ls-remote <url>`.
|
||||
|
||||
Returns:
|
||||
(True, "") on success,
|
||||
(False, error_message) on failure.
|
||||
|
||||
Wichtig:
|
||||
- Wir werten ausschließlich den Exit-Code aus.
|
||||
- STDERR kann Hinweise/Warnings enthalten und ist NICHT automatisch ein Fehler.
|
||||
"""
|
||||
try:
|
||||
# Wir ignorieren stdout komplett; wichtig ist nur, dass der Befehl ohne
|
||||
# GitError (also Exit-Code 0) durchläuft.
|
||||
run_git(["ls-remote", url], cwd=repo_dir)
|
||||
return True, ""
|
||||
except GitError as exc:
|
||||
return False, str(exc)
|
||||
|
||||
|
||||
def _setup_remote_mirrors_for_repo(
|
||||
repo: Repository,
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
preview: bool,
|
||||
ensure_remote: bool,
|
||||
ensure_visibility: str | None,
|
||||
) -> None:
|
||||
"""
|
||||
Remote-side setup / validation.
|
||||
|
||||
Aktuell werden nur **nicht-destruktive Checks** gemacht:
|
||||
|
||||
- Für jeden Mirror (aus config + MIRRORS-Datei, file gewinnt):
|
||||
* `git ls-remote <url>` wird ausgeführt.
|
||||
* Bei Exit-Code 0 → [OK]
|
||||
* Bei Fehler → [WARN] + Details aus der GitError-Exception
|
||||
|
||||
Es werden **keine** Provider-APIs aufgerufen und keine Repos angelegt.
|
||||
"""
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
resolved_m = ctx.resolved_mirrors
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[MIRROR SETUP:REMOTE] {ctx.identifier}")
|
||||
print(f"[MIRROR SETUP:REMOTE] dir: {ctx.repo_dir}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
if not resolved_m:
|
||||
# Optional: Fallback auf eine heuristisch bestimmte URL, falls wir
|
||||
# irgendwann "automatisch anlegen" implementieren wollen.
|
||||
primary_url = determine_primary_remote_url(repo, resolved_m)
|
||||
if not primary_url:
|
||||
print(
|
||||
"[INFO] No mirrors configured (config or MIRRORS file), and no "
|
||||
"primary URL could be derived from provider/account/repository."
|
||||
)
|
||||
git_mirrors = {
|
||||
k: v for k, v in ctx.resolved_mirrors.items() if _is_git_remote_url(v)
|
||||
}
|
||||
|
||||
def _desired_private_default() -> bool:
|
||||
# default behavior: repo['private'] (or True)
|
||||
if ensure_visibility == "public":
|
||||
return False
|
||||
if ensure_visibility == "private":
|
||||
return True
|
||||
return bool(repo.get("private", True))
|
||||
|
||||
def _should_enforce_visibility() -> bool:
|
||||
return ensure_visibility in ("public", "private")
|
||||
|
||||
def _visibility_private_value() -> bool:
|
||||
return ensure_visibility == "private"
|
||||
|
||||
description = str(repo.get("description", ""))
|
||||
|
||||
# If there are no git mirrors, fall back to primary (git) URL.
|
||||
if not git_mirrors:
|
||||
primary = determine_primary_remote_url(repo, ctx)
|
||||
if not primary or not _is_git_remote_url(primary):
|
||||
print("[INFO] No git mirrors to probe or provision.")
|
||||
print()
|
||||
return
|
||||
|
||||
ok, error_message = _probe_mirror(primary_url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror (primary) is reachable: {primary_url}")
|
||||
else:
|
||||
print("[WARN] Primary remote URL is NOT reachable:")
|
||||
print(f" {primary_url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
if ensure_remote:
|
||||
print(f"[REMOTE ENSURE] ensuring primary: {primary}")
|
||||
ensure_remote_repository_for_url(
|
||||
url=primary,
|
||||
private_default=_desired_private_default(),
|
||||
description=description,
|
||||
preview=preview,
|
||||
)
|
||||
# IMPORTANT: enforce visibility only if requested
|
||||
if _should_enforce_visibility():
|
||||
_apply_visibility_for_url(
|
||||
url=primary,
|
||||
private=_visibility_private_value(),
|
||||
description=description,
|
||||
preview=preview,
|
||||
)
|
||||
print()
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
_print_probe_result(None, primary, cwd=ctx.repo_dir)
|
||||
print()
|
||||
return
|
||||
|
||||
# Normaler Fall: wir haben benannte Mirrors aus config/MIRRORS
|
||||
for name, url in sorted(resolved_m.items()):
|
||||
ok, error_message = _probe_mirror(url, ctx.repo_dir)
|
||||
if ok:
|
||||
print(f"[OK] Remote mirror '{name}' is reachable: {url}")
|
||||
else:
|
||||
print(f"[WARN] Remote mirror '{name}' is NOT reachable:")
|
||||
print(f" {url}")
|
||||
if error_message:
|
||||
print(" Details:")
|
||||
for line in error_message.splitlines():
|
||||
print(f" {line}")
|
||||
# Provision ALL git mirrors (if requested)
|
||||
if ensure_remote:
|
||||
for name, url in git_mirrors.items():
|
||||
print(f"[REMOTE ENSURE] ensuring mirror {name!r}: {url}")
|
||||
ensure_remote_repository_for_url(
|
||||
url=url,
|
||||
private_default=_desired_private_default(),
|
||||
description=description,
|
||||
preview=preview,
|
||||
)
|
||||
if _should_enforce_visibility():
|
||||
_apply_visibility_for_url(
|
||||
url=url,
|
||||
private=_visibility_private_value(),
|
||||
description=description,
|
||||
preview=preview,
|
||||
)
|
||||
print()
|
||||
|
||||
# Probe ALL git mirrors
|
||||
for name, url in git_mirrors.items():
|
||||
_print_probe_result(name, url, cwd=ctx.repo_dir)
|
||||
|
||||
print()
|
||||
print(
|
||||
"[INFO] Remote checks are non-destructive and only use `git ls-remote` "
|
||||
"to probe mirror URLs."
|
||||
)
|
||||
print()
|
||||
|
||||
|
||||
@@ -135,31 +201,24 @@ def setup_mirrors(
|
||||
preview: bool = False,
|
||||
local: bool = True,
|
||||
remote: bool = True,
|
||||
ensure_remote: bool = False,
|
||||
ensure_visibility: str | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Setup mirrors for the selected repositories.
|
||||
|
||||
local:
|
||||
- Configure local Git remotes (currently: ensure 'origin' is present and
|
||||
points to a reasonable URL).
|
||||
|
||||
remote:
|
||||
- Non-destructive remote checks using `git ls-remote` for each mirror URL.
|
||||
Es werden keine Repositories auf dem Provider angelegt.
|
||||
"""
|
||||
for repo in selected_repos:
|
||||
if local:
|
||||
_setup_local_mirrors_for_repo(
|
||||
repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
repositories_base_dir,
|
||||
all_repos,
|
||||
preview,
|
||||
)
|
||||
|
||||
if remote:
|
||||
_setup_remote_mirrors_for_repo(
|
||||
repo,
|
||||
repositories_base_dir=repositories_base_dir,
|
||||
all_repos=all_repos,
|
||||
preview=preview,
|
||||
repositories_base_dir,
|
||||
all_repos,
|
||||
preview,
|
||||
ensure_remote,
|
||||
ensure_visibility,
|
||||
)
|
||||
|
||||
111
src/pkgmgr/actions/mirror/url_utils.py
Normal file
111
src/pkgmgr/actions/mirror/url_utils.py
Normal file
@@ -0,0 +1,111 @@
|
||||
# src/pkgmgr/actions/mirror/url_utils.py
|
||||
from __future__ import annotations
|
||||
|
||||
from urllib.parse import urlparse
|
||||
from typing import Optional, Tuple
|
||||
|
||||
|
||||
def hostport_from_git_url(url: str) -> Tuple[str, Optional[str]]:
|
||||
url = (url or "").strip()
|
||||
if not url:
|
||||
return "", None
|
||||
|
||||
if "://" in url:
|
||||
parsed = urlparse(url)
|
||||
netloc = (parsed.netloc or "").strip()
|
||||
if "@" in netloc:
|
||||
netloc = netloc.split("@", 1)[1]
|
||||
|
||||
if netloc.startswith("[") and "]" in netloc:
|
||||
host = netloc[1 : netloc.index("]")]
|
||||
rest = netloc[netloc.index("]") + 1 :]
|
||||
port = rest[1:] if rest.startswith(":") else None
|
||||
return host.strip(), (port.strip() if port else None)
|
||||
|
||||
if ":" in netloc:
|
||||
host, port = netloc.rsplit(":", 1)
|
||||
return host.strip(), (port.strip() or None)
|
||||
|
||||
return netloc.strip(), None
|
||||
|
||||
if "@" in url and ":" in url:
|
||||
after_at = url.split("@", 1)[1]
|
||||
host = after_at.split(":", 1)[0].strip()
|
||||
return host, None
|
||||
|
||||
host = url.split("/", 1)[0].strip()
|
||||
return host, None
|
||||
|
||||
|
||||
def normalize_provider_host(host: str) -> str:
|
||||
host = (host or "").strip()
|
||||
if not host:
|
||||
return ""
|
||||
|
||||
if host.startswith("[") and "]" in host:
|
||||
host = host[1 : host.index("]")]
|
||||
|
||||
if ":" in host and host.count(":") == 1:
|
||||
host = host.rsplit(":", 1)[0]
|
||||
|
||||
return host.strip().lower()
|
||||
|
||||
|
||||
def _strip_dot_git(name: str) -> str:
|
||||
n = (name or "").strip()
|
||||
if n.lower().endswith(".git"):
|
||||
return n[:-4]
|
||||
return n
|
||||
|
||||
|
||||
def parse_repo_from_git_url(url: str) -> Tuple[str, Optional[str], Optional[str]]:
|
||||
"""
|
||||
Parse (host, owner, repo_name) from common Git remote URLs.
|
||||
|
||||
Supports:
|
||||
- ssh://git@host:2201/owner/repo.git
|
||||
- https://host/owner/repo.git
|
||||
- git@host:owner/repo.git
|
||||
- host/owner/repo(.git) (best-effort)
|
||||
|
||||
Returns:
|
||||
(host, owner, repo_name) with owner/repo possibly None if not derivable.
|
||||
"""
|
||||
u = (url or "").strip()
|
||||
if not u:
|
||||
return "", None, None
|
||||
|
||||
# URL-style (ssh://, https://, http://)
|
||||
if "://" in u:
|
||||
parsed = urlparse(u)
|
||||
host = (parsed.hostname or "").strip()
|
||||
path = (parsed.path or "").strip("/")
|
||||
parts = [p for p in path.split("/") if p]
|
||||
if len(parts) >= 2:
|
||||
owner = parts[0]
|
||||
repo_name = _strip_dot_git(parts[1])
|
||||
return host, owner, repo_name
|
||||
return host, None, None
|
||||
|
||||
# SCP-like: git@host:owner/repo.git
|
||||
if "@" in u and ":" in u:
|
||||
after_at = u.split("@", 1)[1]
|
||||
host = after_at.split(":", 1)[0].strip()
|
||||
path = after_at.split(":", 1)[1].strip("/")
|
||||
parts = [p for p in path.split("/") if p]
|
||||
if len(parts) >= 2:
|
||||
owner = parts[0]
|
||||
repo_name = _strip_dot_git(parts[1])
|
||||
return host, owner, repo_name
|
||||
return host, None, None
|
||||
|
||||
# Fallback: host/owner/repo.git
|
||||
host = u.split("/", 1)[0].strip()
|
||||
rest = u.split("/", 1)[1] if "/" in u else ""
|
||||
parts = [p for p in rest.strip("/").split("/") if p]
|
||||
if len(parts) >= 2:
|
||||
owner = parts[0]
|
||||
repo_name = _strip_dot_git(parts[1])
|
||||
return host, owner, repo_name
|
||||
|
||||
return host, None, None
|
||||
134
src/pkgmgr/actions/mirror/visibility_cmd.py
Normal file
134
src/pkgmgr/actions/mirror/visibility_cmd.py
Normal file
@@ -0,0 +1,134 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import List
|
||||
|
||||
from pkgmgr.core.remote_provisioning import ProviderHint, RepoSpec, set_repo_visibility
|
||||
from pkgmgr.core.remote_provisioning.visibility import VisibilityOptions
|
||||
|
||||
from .context import build_context
|
||||
from .git_remote import determine_primary_remote_url
|
||||
from .types import Repository
|
||||
from .url_utils import normalize_provider_host, parse_repo_from_git_url
|
||||
|
||||
|
||||
def _is_git_remote_url(url: str) -> bool:
|
||||
# Keep same semantics as setup_cmd.py / git_remote.py
|
||||
u = (url or "").strip()
|
||||
if not u:
|
||||
return False
|
||||
if u.startswith("git@"):
|
||||
return True
|
||||
if u.startswith("ssh://"):
|
||||
return True
|
||||
if (u.startswith("https://") or u.startswith("http://")) and u.endswith(".git"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _provider_hint_from_host(host: str) -> str | None:
|
||||
h = (host or "").lower()
|
||||
if h == "github.com":
|
||||
return "github"
|
||||
# Best-effort default for self-hosted git domains
|
||||
return "gitea" if h else None
|
||||
|
||||
|
||||
def _apply_visibility_for_url(
|
||||
*,
|
||||
url: str,
|
||||
private: bool,
|
||||
description: str,
|
||||
preview: bool,
|
||||
) -> None:
|
||||
host_raw, owner, name = parse_repo_from_git_url(url)
|
||||
host = normalize_provider_host(host_raw)
|
||||
|
||||
if not host or not owner or not name:
|
||||
print(f"[WARN] Could not parse repo from URL: {url}")
|
||||
return
|
||||
|
||||
spec = RepoSpec(
|
||||
host=host,
|
||||
owner=owner,
|
||||
name=name,
|
||||
private=private,
|
||||
description=description,
|
||||
)
|
||||
|
||||
provider_kind = _provider_hint_from_host(host)
|
||||
res = set_repo_visibility(
|
||||
spec,
|
||||
private=private,
|
||||
provider_hint=ProviderHint(kind=provider_kind),
|
||||
options=VisibilityOptions(preview=preview),
|
||||
)
|
||||
print(f"[REMOTE VISIBILITY] {res.status.upper()}: {res.message}")
|
||||
|
||||
|
||||
def set_mirror_visibility(
|
||||
selected_repos: List[Repository],
|
||||
repositories_base_dir: str,
|
||||
all_repos: List[Repository],
|
||||
*,
|
||||
visibility: str,
|
||||
preview: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Set remote repository visibility for all git mirrors of each selected repo.
|
||||
|
||||
visibility:
|
||||
- "private"
|
||||
- "public"
|
||||
"""
|
||||
v = (visibility or "").strip().lower()
|
||||
if v not in ("private", "public"):
|
||||
raise ValueError("visibility must be 'private' or 'public'")
|
||||
|
||||
desired_private = v == "private"
|
||||
|
||||
for repo in selected_repos:
|
||||
ctx = build_context(repo, repositories_base_dir, all_repos)
|
||||
|
||||
print("------------------------------------------------------------")
|
||||
print(f"[MIRROR VISIBILITY] {ctx.identifier}")
|
||||
print(f"[MIRROR VISIBILITY] dir: {ctx.repo_dir}")
|
||||
print(f"[MIRROR VISIBILITY] target: {v}")
|
||||
print("------------------------------------------------------------")
|
||||
|
||||
git_mirrors = {
|
||||
name: url
|
||||
for name, url in ctx.resolved_mirrors.items()
|
||||
if url and _is_git_remote_url(url)
|
||||
}
|
||||
|
||||
# If there are no git mirrors, fall back to primary (git) URL.
|
||||
if not git_mirrors:
|
||||
primary = determine_primary_remote_url(repo, ctx)
|
||||
if not primary or not _is_git_remote_url(primary):
|
||||
print(
|
||||
"[INFO] No git mirrors found (and no primary git URL). Nothing to do."
|
||||
)
|
||||
print()
|
||||
continue
|
||||
|
||||
print(f"[MIRROR VISIBILITY] applying to primary: {primary}")
|
||||
_apply_visibility_for_url(
|
||||
url=primary,
|
||||
private=desired_private,
|
||||
description=str(repo.get("description", "")),
|
||||
preview=preview,
|
||||
)
|
||||
print()
|
||||
continue
|
||||
|
||||
# Apply to ALL git mirrors
|
||||
for name, url in git_mirrors.items():
|
||||
print(f"[MIRROR VISIBILITY] applying to mirror {name!r}: {url}")
|
||||
_apply_visibility_for_url(
|
||||
url=url,
|
||||
private=desired_private,
|
||||
description=str(repo.get("description", "")),
|
||||
preview=preview,
|
||||
)
|
||||
|
||||
print()
|
||||
@@ -4,7 +4,16 @@ from pkgmgr.core.repository.dir import get_repo_dir
|
||||
from pkgmgr.core.command.run import run_command
|
||||
import sys
|
||||
|
||||
def exec_proxy_command(proxy_prefix: str, selected_repos, repositories_base_dir, all_repos, proxy_command: str, extra_args, preview: bool):
|
||||
|
||||
def exec_proxy_command(
|
||||
proxy_prefix: str,
|
||||
selected_repos,
|
||||
repositories_base_dir,
|
||||
all_repos,
|
||||
proxy_command: str,
|
||||
extra_args,
|
||||
preview: bool,
|
||||
):
|
||||
"""Execute a given proxy command with extra arguments for each repository."""
|
||||
error_repos = []
|
||||
max_exit_code = 0
|
||||
@@ -22,7 +31,9 @@ def exec_proxy_command(proxy_prefix: str, selected_repos, repositories_base_dir,
|
||||
try:
|
||||
run_command(full_cmd, cwd=repo_dir, preview=preview)
|
||||
except SystemExit as e:
|
||||
print(f"[ERROR] Command failed in {repo_identifier} with exit code {e.code}.")
|
||||
print(
|
||||
f"[ERROR] Command failed in {repo_identifier} with exit code {e.code}."
|
||||
)
|
||||
error_repos.append((repo_identifier, e.code))
|
||||
max_exit_code = max(max_exit_code, e.code)
|
||||
|
||||
@@ -30,4 +41,4 @@ def exec_proxy_command(proxy_prefix: str, selected_repos, repositories_base_dir,
|
||||
print("\nSummary of failed commands:")
|
||||
for repo_identifier, exit_code in error_repos:
|
||||
print(f"- {repo_identifier} failed with exit code {exit_code}")
|
||||
sys.exit(max_exit_code)
|
||||
sys.exit(max_exit_code)
|
||||
|
||||
5
src/pkgmgr/actions/publish/__init__.py
Normal file
5
src/pkgmgr/actions/publish/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from .workflow import publish
|
||||
|
||||
__all__ = ["publish"]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user