Skip to content

Release

Release #69

Workflow file for this run

name: Release
on:
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-${{ github.ref_name }}
cancel-in-progress: false
env:
COMMIT_NAME: Monkeynator
COMMIT_EMAIL: monkeynator@enix.io
jobs:
semver:
name: Semantic Version
runs-on: ubuntu-22.04
outputs:
last: ${{ steps.dry-run.outputs.last_release_version }}
published: ${{ steps.dry-run.outputs.new_release_published }}
channel: ${{ steps.dry-run.outputs.new_release_channel }}
version: ${{ steps.dry-run.outputs.new_release_version }}
major: ${{ steps.dry-run.outputs.new_release_major_version }}
minor: ${{ steps.dry-run.outputs.new_release_minor_version }}
patch: ${{ steps.dry-run.outputs.new_release_patch_version }}
notes: ${{ steps.dry-run.outputs.new_release_notes }}
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
persist-credentials: false
- name: Run semantic-release (dry-run)
id: dry-run
uses: cycjimmy/semantic-release-action@v4
with:
dry_run: true
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_GITHUB_TOKEN }}
- name: Inspect semantic-release (dry-run) outcome
shell: python
env:
PYTHONPATH: ${{ github.workspace }}/.github
SR_LAST: ${{ steps.dry-run.outputs.last_release_version }}
SR_PUBLISHED: ${{ steps.dry-run.outputs.new_release_published }}
SR_CHANNEL: ${{ steps.dry-run.outputs.new_release_channel }}
SR_VERSION: ${{ steps.dry-run.outputs.new_release_version }}
SR_MAJOR: ${{ steps.dry-run.outputs.new_release_major_version }}
SR_MINOR: ${{ steps.dry-run.outputs.new_release_minor_version }}
SR_PATCH: ${{ steps.dry-run.outputs.new_release_patch_version }}
SR_NOTES: ${{ steps.dry-run.outputs.new_release_notes }}
run: |
from lib import *
import os
header('semantic-release job outputs')
info('last = {}'.format(os.environ['SR_LAST']))
info('published = {}'.format(os.environ['SR_PUBLISHED']))
info('channel = {}'.format(os.environ['SR_CHANNEL']))
info('version = {}'.format(os.environ['SR_VERSION']))
info('major = {}'.format(os.environ['SR_MAJOR']))
info('minor = {}'.format(os.environ['SR_MINOR']))
info('patch = {}'.format(os.environ['SR_PATCH']))
info('notes ⏎\n{}'.format(os.environ['SR_NOTES']))
header('sanity checks')
action('should be published')
assert_equality((
(os.environ['SR_PUBLISHED'], 'true'),
))
tests:
name: Tests
runs-on: ubuntu-22.04
env:
DOCKER_CLIENT_API_VERSION: '1.43' # 1.45 is not supported by ubuntu-22.04 GitHub image
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Set up Golang
uses: actions/setup-go@v5
with:
go-version-file: go.mod
check-latest: true
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.1
- name: Run golangci-lint
uses: golangci/golangci-lint-action@v6
with:
version: latest
args: --timeout 3m --verbose --issues-exit-code=0
- name: Run unit tests
run: |
set -euo pipefail
make test
go tool cover -html=cover.out -o coverage.html
- name: Run hadolint
id: hadolint
uses: hadolint/hadolint-action@v3.1.0
- name: Run chart-testing (lint)
run: |
set -euo pipefail
ct lint \
--charts helm/kube-image-keeper \
--chart-repos bitnami=https://charts.bitnami.com/bitnami \
--validate-maintainers=false --check-version-increment=false
build:
name: Build test image
needs:
- semver
- tests
runs-on: ubuntu-22.04
env:
VERSION: ${{ github.run_id }}
HARBOR_URL: "harbor.enix.io"
HARBOR_REPO: "kube-image-keeper/kube-image-keeper"
GHCR_IMAGE: "ghcr.io/enix/kube-image-keeper"
QUAY_IMAGE: "quay.io/enix/kube-image-keeper"
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
path: repository
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to the Container registry
uses: docker/login-action@v3
with:
registry: ${{ env.HARBOR_URL }}
username: ${{ secrets.HARBOR_USERNAME }}
password: ${{ secrets.HARBOR_PASSWORD }}
- name: Generate image metadata
id: meta
uses: docker/metadata-action@v5
with:
images: |
${{ env.GHCR_IMAGE }}
${{ github.repository }}
${{ env.QUAY_IMAGE }}
- name: Build container images
uses: docker/build-push-action@v6
with:
context: repository
platforms: linux/amd64,linux/arm64
build-args: |
"VERSION=${{ env.VERSION }}"
"REVISION=${{ github.sha }}"
push: true
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ env.HARBOR_URL }}/${{ env.HARBOR_REPO }}:${{ env.VERSION }}
- name: Build alpine container images
uses: docker/build-push-action@v6
with:
context: repository
platforms: linux/amd64,linux/arm64
build-args: |
"VERSION=${{ env.VERSION }}"
"REVISION=${{ github.sha }}"
target: alpine
push: true
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ env.HARBOR_URL }}/${{ env.HARBOR_REPO }}:${{ env.VERSION }}-alpine
e2e_install:
name: Tests e2e on K8s (Fresh install)
needs:
- build
runs-on: ubuntu-22.04
env:
VERSION: ${{ github.run_id }}
HARBOR_IMAGE: "harbor.enix.io/kube-image-keeper/kube-image-keeper"
HARBOR_REGISTRY: "harbor.enix.io"
HARBOR_USERNAME: ${{ secrets.HARBOR_USERNAME }}
HARBOR_PASSWORD: ${{ secrets.HARBOR_PASSWORD }}
strategy:
max-parallel: 6
matrix:
k8sversion: ["v1.24.17", "v1.25.16", "v1.26.14", "v1.27.11", "v1.28.7", "v1.29.2"]
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Setup KinD
uses: helm/kind-action@v1.10.0
with:
node_image: kindest/node:${{ matrix.k8sversion }}
- name: Run cert-manager installation
run: |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml
kubectl wait pods -n cert-manager -l app.kubernetes.io/instance=cert-manager --for condition=Ready --timeout=90s
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.1
- name: Set up helm
uses: azure/setup-helm@v4
with:
version: '3.9.0'
- name: Run chart-testing (lint)
run: |
set -euo pipefail
ct lint \
--charts helm/kube-image-keeper \
--chart-repos bitnami=https://charts.bitnami.com/bitnami \
--validate-maintainers=false --check-version-increment=false
- name: Run helm (install)
run : |
set -euo pipefail
kubectl create namespace kuik-system
kubectl create secret docker-registry harbor-secret -n kuik-system --docker-server=${{ env.HARBOR_REGISTRY }} \
--docker-username="$HARBOR_USERNAME" --docker-password="$HARBOR_PASSWORD"
helm upgrade --install kube-image-keeper -n kuik-system --create-namespace ./helm/kube-image-keeper \
--set controllers.image.tag=$VERSION --set proxy.image.tag=$VERSION \
--set controllers.image.repository=$HARBOR_IMAGE --set proxy.image.repository=$HARBOR_IMAGE \
--set controllers.imagePullSecrets[0].name=harbor-secret --set proxy.image.imagePullSecrets[0].name=harbor-secret --debug
kubectl wait pods -n kuik-system -l app.kubernetes.io/instance=kube-image-keeper --for condition=Ready --timeout=90s
helm history kube-image-keeper -n kuik-system
- name: Deploy test container
run: |
set -euo pipefail
kubectl create deploy nginx --image=nginx:stable-alpine --replicas=2
kubectl rollout status deploy nginx
kubectl wait deployment nginx --for condition=Available=True --timeout=30s
echo "kubectl get cachedimage"
kubectl get cachedimages
echo "kubectl get repository"
kubectl get repository
- name: Test cachedimage (CRD)
run: |
set -euo pipefail
## Check if our test image is cached
if [ $(kubectl get cachedimages docker.io-library-nginx-stable-alpine -o json | jq ".status.isCached") ];
then
if [ $(kubectl get cachedimages docker.io-library-nginx-stable-alpine -o json | jq ".status.usedBy.count") -eq 2 ];
then
echo "Found cached image used by 2 pods"
else
echo "Error: pods count should be equal 2"
exit 1
fi
else
echo "Error: image cached status is false"
exit 1
fi
- name: Test repository (CRD)
run: |
set -euo pipefail
## Check repository status
if [ $(kubectl get repository docker.io-library-nginx -o json | jq '.status.phase') == '"Ready"' ] ;
then
echo "Found repository"
else
echo "Error: image repository status is not Ready"
exit 1
fi
- name: Test metrics endpoint
run: |
set -euo pipefail
## Check for kuik's components metrics
for component in proxy controllers
do
echo "Testing $component metrics endpoint"
for ip in $(kubectl get po -l "app.kubernetes.io/component=$component" -n kuik-system -o jsonpath='{range .items[*]}{.status.podIP}{"\n"}{end}')
do
attempts=0
success=false
while [[ $attempts -lt 3 && $success == false ]]
do
response=$(kubectl run curl-pod --image=curlimages/curl --rm -ti --quiet --restart=Never -- curl -s -o /dev/null -w "%{http_code}\n" http://$ip:8080/metrics)
if [[ -z "$response" ]]; then
echo "No HTTP response received from $ip"
elif [[ $response -ge 200 && $response -lt 300 ]]; then
echo "HTTP status code $response is valid for $ip"
success=true
else
echo "HTTP status code $response is not valid for $ip"
fi
attempts=$(( $attempts + 1 ))
sleep 3
done
if [[ $success == false ]]; then
echo "Failed after 3 attempts for $ip"
exit 1
fi
done
done
e2e_upgrade:
name: Tests e2e on K8s (Upgrade)
needs:
- build
- e2e_install
runs-on: ubuntu-22.04
env:
VERSION: ${{ github.run_id }}
HARBOR_IMAGE: "harbor.enix.io/kube-image-keeper/kube-image-keeper"
HARBOR_REGISTRY: "harbor.enix.io"
HARBOR_USERNAME: ${{ secrets.HARBOR_USERNAME }}
HARBOR_PASSWORD: ${{ secrets.HARBOR_PASSWORD }}
strategy:
max-parallel: 6
matrix:
k8sversion: ["v1.24.17", "v1.25.16", "v1.26.14", "v1.27.11", "v1.28.7", "v1.29.2"]
steps:
- name: Checkout Repository
uses: actions/checkout@v4
- name: Setup KinD
uses: helm/kind-action@v1.10.0
with:
node_image: kindest/node:${{ matrix.k8sversion }}
- name: Run cert-manager installation
run: |
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.0/cert-manager.yaml
kubectl wait pods -n cert-manager -l app.kubernetes.io/instance=cert-manager --for condition=Ready --timeout=30s
- name: Set up chart-testing
uses: helm/chart-testing-action@v2.6.1
- name: Set up helm
uses: azure/setup-helm@v4
with:
version: '3.9.0'
- name: Run chart-testing (lint)
run: |
set -euo pipefail
ct lint \
--charts helm/kube-image-keeper \
--chart-repos bitnami=https://charts.bitnami.com/bitnami \
--validate-maintainers=false --check-version-increment=false
- name: Run helm (install latest release)
run : |
set -euo pipefail
helm repo add enix https://charts.enix.io/
helm repo update
helm upgrade --install kube-image-keeper -n kuik-system --create-namespace enix/kube-image-keeper --debug
kubectl wait pods -n kuik-system -l app.kubernetes.io/instance=kube-image-keeper --for condition=Ready --timeout=30s
kubectl get po -n kuik-system
- name: Run helm (upgrade)
run : |
set -euo pipefail
kubectl create secret docker-registry harbor-secret -n kuik-system --docker-server=${{ env.HARBOR_REGISTRY }} \
--docker-username="$HARBOR_USERNAME" --docker-password="$HARBOR_PASSWORD"
helm upgrade --install kube-image-keeper -n kuik-system --create-namespace ./helm/kube-image-keeper \
--set controllers.image.tag=$VERSION --set proxy.image.tag=$VERSION \
--set controllers.image.repository=$HARBOR_IMAGE --set proxy.image.repository=$HARBOR_IMAGE \
--set controllers.imagePullSecrets[0].name=harbor-secret --set proxy.image.imagePullSecrets[0].name=harbor-secret --wait --debug
kubectl rollout status deploy kube-image-keeper-controllers -n kuik-system
kubectl rollout status ds kube-image-keeper-proxy -n kuik-system
helm history kube-image-keeper -n kuik-system
- name: Deploy test container
run: |
set -euo pipefail
kubectl create deploy nginx --image=nginx:stable-alpine --replicas=2
kubectl rollout status deploy nginx
kubectl wait deployment nginx --for condition=Available=True --timeout=30s
echo "kubectl get cachedimage"
kubectl get cachedimages
echo "kubectl get repository"
kubectl get repository
- name: Test cachedimage (CRD)
run: |
set -euo pipefail
## Check if our test image is cached
if [ $(kubectl get cachedimages docker.io-library-nginx-stable-alpine -o json | jq ".status.isCached") ];
then
if [ $(kubectl get cachedimages docker.io-library-nginx-stable-alpine -o json | jq ".status.usedBy.count") -eq 2 ];
then
echo "Found cached image used by 2 pods"
else
echo "Error: pods count should be equal 2"
exit 1
fi
else
echo "Error: image cached status is false"
exit 1
fi
- name: Test repository (CRD)
run: |
set -euo pipefail
## Check repository status
if [ $(kubectl get repository docker.io-library-nginx -o json | jq '.status.phase') == '"Ready"' ] ;
then
echo "Found repository"
else
echo "Error: image repository status is not Ready"
exit 1
fi
- name: Test metrics endpoint
run: |
set -euo pipefail
## Check for kuik's components metrics
for component in proxy controllers
do
echo "Testing $component metrics endpoint"
for ip in $(kubectl get po -l "app.kubernetes.io/component=$component" -n kuik-system -o jsonpath='{range .items[*]}{.status.podIP}{"\n"}{end}')
do
attempts=0
success=false
while [[ $attempts -lt 3 && $success == false ]]
do
response=$(kubectl run curl-pod --image=curlimages/curl --rm -ti --quiet --restart=Never -- curl -s -o /dev/null -w "%{http_code}\n" http://$ip:8080/metrics)
if [[ -z "$response" ]]; then
echo "No HTTP response received from $ip"
elif [[ $response -ge 200 && $response -lt 300 ]]; then
echo "HTTP status code $response is valid for $ip"
success=true
else
echo "HTTP status code $response is not valid for $ip"
fi
attempts=$(( $attempts + 1 ))
sleep 3
done
if [[ $success == false ]]; then
echo "Failed after 3 attempts for $ip"
exit 1
fi
done
done
release:
name: Release
needs:
- e2e_upgrade
- semver
runs-on: ubuntu-22.04
outputs:
last: ${{ steps.release.outputs.last_release_version }}
published: ${{ steps.release.outputs.new_release_published }}
channel: ${{ steps.release.outputs.new_release_channel }}
version: ${{ steps.release.outputs.new_release_version }}
major: ${{ steps.release.outputs.new_release_major_version }}
minor: ${{ steps.release.outputs.new_release_minor_version }}
patch: ${{ steps.release.outputs.new_release_patch_version }}
notes: ${{ steps.release.outputs.new_release_notes }}
prerelease: ${{ steps.inspect.outputs.prerelease }}
steps:
- name: Checkout Repository
uses: actions/checkout@v4
with:
path: repository
persist-credentials: false
- name: Run semantic-release
id: release
uses: cycjimmy/semantic-release-action@v4
with:
working_directory: repository
env:
GITHUB_TOKEN: ${{ secrets.RELEASE_GITHUB_TOKEN }}
GIT_COMMITTER_NAME: ${{ env.COMMIT_NAME }}
GIT_COMMITTER_EMAIL: ${{ env.COMMIT_EMAIL }}
GIT_AUTHOR_NAME: ${{ env.COMMIT_NAME }}
GIT_AUTHOR_EMAIL: ${{ env.COMMIT_EMAIL }}
- name: Inspect semantic-release outcome
id: inspect
shell: python
env:
PYTHONPATH: ${{ github.workspace }}/repository/.github
SRDRY_CHANNEL: ${{ needs.semver.outputs.channel }}
SRDRY_VERSION: ${{ needs.semver.outputs.version }}
SR_LAST: ${{ steps.release.outputs.last_release_version }}
SR_PUBLISHED: ${{ steps.release.outputs.new_release_published }}
SR_CHANNEL: ${{ steps.release.outputs.new_release_channel }}
SR_VERSION: ${{ steps.release.outputs.new_release_version }}
SR_MAJOR: ${{ steps.release.outputs.new_release_major_version }}
SR_MINOR: ${{ steps.release.outputs.new_release_minor_version }}
SR_PATCH: ${{ steps.release.outputs.new_release_patch_version }}
SR_NOTES: ${{ steps.release.outputs.new_release_notes }}
run: |
from lib import *
import os
header('semantic-release job outputs')
info('last = {}'.format(os.environ['SR_LAST']))
info('published = {}'.format(os.environ['SR_PUBLISHED']))
info('channel = {}'.format(os.environ['SR_CHANNEL']))
info('version = {}'.format(os.environ['SR_VERSION']))
info('major = {}'.format(os.environ['SR_MAJOR']))
info('minor = {}'.format(os.environ['SR_MINOR']))
info('patch = {}'.format(os.environ['SR_PATCH']))
info('notes ⏎\n{}'.format(os.environ['SR_NOTES']))
header('sanity checks')
action('should be published')
assert_equality((
(os.environ['SR_PUBLISHED'], 'true'),
))
action('consistency with the dry-run')
assert_equality((
(os.environ['SR_CHANNEL'], os.environ['SRDRY_CHANNEL']),
(os.environ['SR_VERSION'], os.environ['SRDRY_VERSION']),
))
header('set the prerelease status')
is_prerelease = '-' in os.environ['SR_VERSION']
info('pre-release = {}'.format(is_prerelease))
output = '{}'.format(str(is_prerelease).lower())
with open(os.environ['GITHUB_OUTPUT'], 'a') as outfile:
print (f'prerelease={output}', file=outfile)
containers:
name: Containers
needs:
- release
runs-on: ubuntu-22.04
env:
HELM_DOCS_VERSION: "1.11.0"
CR_VERSION: "1.4.1"
VERSION: ${{ needs.release.outputs.version }}
PRERELEASE: ${{ needs.release.outputs.prerelease }}
QUAY_IMAGE: "quay.io/enix/kube-image-keeper"
GHCR_IMAGE: "ghcr.io/enix/kube-image-keeper"
steps:
- name: Configure git
run: |
set -euo pipefail
git config --global user.name '${{ env.COMMIT_NAME }}'
git config --global user.email '${{ env.COMMIT_EMAIL }}'
- name: Checkout Repository
uses: actions/checkout@v4
with:
path: repository
- name: Set up helm
uses: azure/setup-helm@v4
with:
version: '3.9.0'
- name: Helm repository deps
run: |
helm repo add bitnami https://charts.bitnami.com/bitnami
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Cache for chart-releaser
id: cache-cr
uses: actions/cache@v4
with:
path: bin/cr
key: ${{ runner.os }}-cr-${{ env.CR_VERSION }}
- name: Set up chart-releaser
if: steps.cache-cr.outputs.cache-hit != 'true'
run: |
set -euo pipefail
[ -d bin ] || mkdir bin
URL='https://github.com/helm/chart-releaser/releases/download/v${{ env.CR_VERSION }}/chart-releaser_${{ env.CR_VERSION }}_linux_amd64.tar.gz'
curl -sSL "${URL}" | tar xz -C bin cr
- name: Cache for helm-docs
id: cache-helm-docs
uses: actions/cache@v4
with:
path: bin/helm-docs
key: ${{ runner.os }}-helm-docs-${{ env.HELM_DOCS_VERSION }}
- name: Set up helm-docs
if: steps.cache-helm-docs.outputs.cache-hit != 'true'
run: |
set -euo pipefail
[ -d bin ] || mkdir bin
URL='https://github.com/norwoodj/helm-docs/releases/download/v${{ env.HELM_DOCS_VERSION }}/helm-docs_${{ env.HELM_DOCS_VERSION }}_Linux_x86_64.tar.gz'
curl -sSL "${URL}" | tar xz -C bin helm-docs
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Login to GHCR
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ env.COMMIT_NAME }}
password: ${{ secrets.RELEASE_GITHUB_TOKEN }}
- name: Login to Quay.io
uses: docker/login-action@v3
with:
registry: quay.io
username: ${{ secrets.QUAY_USERNAME }}
password: ${{ secrets.QUAY_ROBOT_TOKEN }}
- name: Generate image metadata
id: meta
uses: docker/metadata-action@v5
with:
images: |
${{ github.repository }}
${{ env.QUAY_IMAGE }}
${{ env.GHCR_IMAGE }}
- name: Build container images
uses: docker/build-push-action@v6
with:
context: repository
platforms: linux/amd64,linux/arm64
build-args: |
"VERSION=${{ env.VERSION }}"
"REVISION=${{ github.sha }}"
push: false
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ github.repository }}:${{ env.VERSION }}
${{ env.QUAY_IMAGE }}:${{ env.VERSION }}
${{ env.GHCR_IMAGE }}:${{ env.VERSION }}
- name: Push container images tag (Latest)
uses: docker/build-push-action@v6
if: ${{ env.PRERELEASE != 'true' }}
with:
context: repository
platforms: linux/amd64,linux/arm64
build-args: |
"VERSION=${{ env.VERSION }}"
"REVISION=${{ github.sha }}"
push: true
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ github.repository }}:latest
${{ env.QUAY_IMAGE }}:latest
${{ env.GHCR_IMAGE }}:latest
- name: Push container images tag (Release)
uses: docker/build-push-action@v6
with:
context: repository
platforms: linux/amd64,linux/arm64
build-args: |
"VERSION=${{ env.VERSION }}"
"REVISION=${{ github.sha }}"
push: true
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ github.repository }}:${{ env.VERSION }}
${{ env.QUAY_IMAGE }}:${{ env.VERSION }}
${{ env.GHCR_IMAGE }}:${{ env.VERSION }}
- name: Push alpine container images tag (Release)
uses: docker/build-push-action@v6
with:
context: repository
platforms: linux/amd64,linux/arm64
build-args: |
"VERSION=${{ env.VERSION }}"
"REVISION=${{ github.sha }}"
target: alpine
push: true
labels: ${{ steps.meta.outputs.labels }}
tags: |
${{ github.repository }}:${{ env.VERSION }}-alpine
${{ env.QUAY_IMAGE }}:${{ env.VERSION }}-alpine
${{ env.GHCR_IMAGE }}:${{ env.VERSION }}-alpine
- name: Convert Github changelog for Artifacthub
shell: python
env:
GITHUB_CHANGELOG: ${{ needs.release.outputs.notes }}
run: |
import os, yaml, re
# Based on:
# - https://github.com/conventional-changelog/conventional-changelog/blob/master/packages/conventional-changelog-angular/writer-opts.js
# - https://github.com/artifacthub/hub/blob/master/web/src/layout/package/changelog/Content.tsx
header_to_kind = {
'Features': { 'kind': 'added', 'prefix': '' },
'Bug Fixes': { 'kind': 'fixed', 'prefix': '' },
'Reverts': { 'kind': 'removed', 'prefix': 'revert' },
'Performance Improvements': { 'kind': 'changed', 'prefix': 'perf' },
'BREAKING CHANGES': { 'kind': 'changed', 'prefix': 'BREAKING' },
# sections bellow won't show up in conventional-changelog unless having 'BREAKING' notes
'Documentation': { 'kind': 'changed', 'prefix': 'docs' },
'Styles': { 'kind': 'changed', 'prefix': 'style' },
'Code Refactoring': { 'kind': 'changed', 'prefix': 'refactor' },
'Tests': { 'kind': 'changed', 'prefix': 'test' },
'Build System': { 'kind': 'changed', 'prefix': 'build' },
'Continuous Integration': { 'kind': 'changed', 'prefix': 'ci' },
}
extract_log = re.compile(
r'\* '
r'(?:\*\*(?P<scope>.+):\*\* )?'
r'(?P<description>.*?)'
r'(?: \(\[[0-9a-f]+\]\((?P<commit>[^)]*)\)\)'
r'(?:, closes (?P<issues>.*))?'
r')?')
extract_issues = re.compile(
r' ?(?:(?:#[0-9+])|(?:\[#(?P<id>[0-9]+)\]\((?P<url>[^)]*)\)))+')
entries = []
mapping = None
for line in os.environ['GITHUB_CHANGELOG'].splitlines():
if line.startswith('### '):
header = line[4:].strip()
mapping = header_to_kind.get(header, None)
continue
if mapping and line.startswith('*'):
match = extract_log.fullmatch(line)
if match is None:
raise ValueError('failed to extract log line: {}'.format(line))
scope = match.group('scope')
if scope == '*':
scope = None
kind = mapping.get('kind')
description = match.group('description')
desc_prefix = mapping.get('prefix')
if desc_prefix:
if scope:
description = '{}({}): {}'.format(desc_prefix, scope, description)
else:
description = '{}: {}'.format(desc_prefix, description)
else:
if scope == 'security':
kind = 'security'
elif scope:
description = '{}: {}'.format(scope, description)
links = []
commit_url = match.group('commit')
if commit_url:
links.append({
'name': 'GitHub commit',
'url': commit_url
})
issues = match.group('issues')
if issues:
for issue in extract_issues.finditer(issues):
links.append({
'name': 'GitHub issue #{}'.format(issue.group('id')),
'url': issue.group('url')
})
entry = {
'kind': kind,
'description': description
}
if len(links):
entry['links'] = links
entries.append(entry)
if len(entries):
output = yaml.dump(entries)
else:
output = ''
print(output)
with open(os.environ['GITHUB_ENV'], 'a') as outfile:
outfile.write('ARTIFACTHUB_CHANGELOG<<EOF\n')
outfile.write(output)
outfile.write('EOF\n')
# TODO
# - OCI releases?
# - signing?
- name: Run chart-releaser
shell: python
env:
WORKSPACE: ${{ github.workspace }}
PYTHONPATH: ${{ github.workspace }}/repository/.github
HELM_DOCS_PATH: ${{ github.workspace }}/bin/helm-docs
CR_PATH: ${{ github.workspace }}/bin/cr
CR_TOKEN: ${{ secrets.CHARTSREPO_GITHUB_TOKEN }}
CHART_NAME: kube-image-keeper
VERSION: ${{ needs.release.outputs.version }}
PRERELEASE: ${{ needs.release.outputs.prerelease }}
ARTIFACTHUB_CHANGELOG: ${{ env.ARTIFACTHUB_CHANGELOG }}
run: |
from lib import *
import os, yaml, shutil
chart_path = os.path.join(os.environ['WORKSPACE'], 'repository', 'helm', 'kube-image-keeper')
os.chdir(chart_path)
header('generate chart readme')
run('make', '-C', '../../', 'helm-docs')
run(os.environ['HELM_DOCS_PATH'], '--dry-run')
run(os.environ['HELM_DOCS_PATH'])
header('prepare chart manifest')
version = os.environ['VERSION']
assert_length_above((
(version, 0),
))
is_prerelease = os.environ['PRERELEASE']
is_security_update = 'false' # FIXME
assert_in((
(is_prerelease, ('true', 'false')),
(is_security_update, ('true', 'false')),
))
info('version = {}'.format(version))
info('prerelease = {}'.format(is_prerelease))
info('security fix = {}'.format(is_security_update))
manifest_file = 'Chart.yaml'
manifest = yaml.safe_load(open(manifest_file, 'r'))
manifest.update({
'version': version,
'appVersion': version,
})
manifest['annotations'].update({
'artifacthub.io/prerelease': is_prerelease,
'artifacthub.io/containsSecurityUpdates': is_security_update,
})
changelog = os.environ['ARTIFACTHUB_CHANGELOG']
if len(changelog):
manifest['annotations'].update({
'artifacthub.io/changes': changelog,
})
open(manifest_file, 'w').write(yaml.dump(manifest))
header('inspect files to be released')
for yaml_file in (manifest_file,):
action('YAML: {}'.format(yaml_file))
print(yaml.safe_load(open(yaml_file, 'r')))
header('release the chart')
action('clone helm charts repository')
charts_repo = os.path.join(os.environ['WORKSPACE'], 'enix-charts')
run('git', 'clone', 'https://github.com/enix/helm-charts', charts_repo)
action('copy chart files')
repo_chart_path = os.path.join(charts_repo, 'charts', os.environ['CHART_NAME'])
shutil.copytree(chart_path, repo_chart_path, symlinks=True, dirs_exist_ok=False)
action('create the chart package')
run(os.environ['CR_PATH'], 'package', repo_chart_path, cwd=charts_repo)
action('upload the chart')
run(os.environ['CR_PATH'], 'upload', '--skip-existing', '--owner', 'enix', '--git-repo', 'helm-charts', cwd=charts_repo)
action('update repository index')
run(os.environ['CR_PATH'], 'index', '--push', '-i', 'index.yaml', '--owner', 'enix', '--git-repo', 'helm-charts', cwd=charts_repo)
cleanup:
name: Cleanup
needs:
# - release
- containers
runs-on: ubuntu-22.04
steps:
- name: Delete assets artifact
uses: geekyeggo/delete-artifact@v5
with:
name: binaries