++gitea updates

This commit is contained in:
Nico Schottelius 2021-06-27 19:23:49 +02:00
parent a4e3ef1431
commit 379addec5e
11 changed files with 177 additions and 349 deletions

View file

@ -1,6 +1,6 @@
apiVersion: v2 apiVersion: v2
name: gitea name: gitea
description: A Helm chart for Kubernetes description: ungleich managed gitea
# A chart can be either an 'application' or a 'library' chart. # A chart can be either an 'application' or a 'library' chart.
# #
@ -21,4 +21,4 @@ version: 0.1.0
# incremented each time you make changes to the application. Versions are not expected to # incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using. # follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes. # It is recommended to use it with quotes.
appVersion: "1.16.0" appVersion: "1.14"

View file

@ -1,22 +0,0 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "gitea.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "gitea.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "gitea.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "gitea.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View file

@ -1,62 +0,0 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "gitea.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "gitea.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "gitea.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "gitea.labels" -}}
helm.sh/chart: {{ include "gitea.chart" . }}
{{ include "gitea.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "gitea.selectorLabels" -}}
app.kubernetes.io/name: {{ include "gitea.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "gitea.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "gitea.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View file

@ -1,61 +1,171 @@
---
apiVersion: apps/v1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: {{ include "gitea.fullname" . }} name: {{ .Release.Name }}-gitea
labels:
{{- include "gitea.labels" . | nindent 4 }}
spec: spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCount }}
{{- end }}
selector: selector:
matchLabels: matchLabels:
{{- include "gitea.selectorLabels" . | nindent 6 }} app: {{ .Release.Name }}-gitea
replicas: 1
template: template:
metadata: metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels: labels:
{{- include "gitea.selectorLabels" . | nindent 8 }} app: {{ .Release.Name }}-gitea
use-as-service: {{ .Release.Name }}
spec: spec:
{{- with .Values.imagePullSecrets }} initContainers:
imagePullSecrets: - name: wait-for-cert
{{- toYaml . | nindent 8 }} image: busybox
{{- end }} command:
serviceAccountName: {{ include "gitea.serviceAccountName" . }} - sh
securityContext: - -c
{{- toYaml .Values.podSecurityContext | nindent 8 }} - until ls /etc/letsencrypt/live/{{ tpl .Values.fqdn . }}/fullchain.pem; do sleep 5; done
volumeMounts:
- name: etcletsencrypt
mountPath: "/etc/letsencrypt"
containers: containers:
- name: {{ .Chart.Name }} # This container will only start *after* the cert has been placed
securityContext: - name: nginx
{{- toYaml .Values.securityContext | nindent 12 }} image: nginx:1.21-alpine
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports: ports:
- name: http - containerPort: 443
containerPort: 80 volumeMounts:
protocol: TCP - name: nginx-config
livenessProbe: mountPath: "/etc/nginx/conf.d/"
httpGet: - name: etcletsencrypt
path: / mountPath: "/etc/letsencrypt"
port: http - name: gitea
readinessProbe: image: gitea/gitea:1.14
httpGet: ports:
path: / - containerPort: 22
port: http - containerPort: 3000
resources: env:
{{- toYaml .Values.resources | nindent 12 }} - name: USER_UID
{{- with .Values.nodeSelector }} value: "1000"
nodeSelector: - name: USER_GID
{{- toYaml . | nindent 8 }} value: "1000"
{{- end }} volumeMounts:
{{- with .Values.affinity }} - name: etcletsencrypt
affinity: mountPath: "/etc/letsencrypt"
{{- toYaml . | nindent 8 }} - name: data
{{- end }} mountPath: "/data"
{{- with .Values.tolerations }} volumes:
tolerations: - name: etcletsencrypt
{{- toYaml . | nindent 8 }} persistentVolumeClaim:
{{- end }} claimName: {{ tpl .Values.identifier . }}-letsencrypt-certs
- name: data
persistentVolumeClaim:
claimName: {{ tpl .Values.identifier . }}-data
- name: nginx-config
configMap:
name: {{ tpl .Values.identifier . }}-nginx-config
---
apiVersion: v1
kind: Service
metadata:
name: {{ tpl .Values.identifier . }}
labels:
app: {{ tpl .Values.identifier . }}
spec:
type: ClusterIP
ports:
- port: 22
name: ssh
# Required for letsencrypt
- port: 80
name: http
- port: 443
name: https
selector:
use-as-service: {{ .Release.Name }}
# ---
# apiVersion: v1
# kind: ConfigMap
# metadata:
# name: {{ tpl .Values.identifier . }}-giteaconfig
# data:
# {{ tpl (.Files.Glob "gitea/*").AsConfig . | indent 2 }}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ tpl .Values.identifier . }}-letsencrypt-certs
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 50Mi
storageClassName: rook-cephfs
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: {{ tpl .Values.identifier . }}-data
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 100Mi
storageClassName: rook-cephfs
---
apiVersion: batch/v1
kind: Job
metadata:
name: {{ tpl .Values.identifier . }}-getcert
spec:
template:
metadata:
labels:
app: certbot-letsencrypt-getcert
use-as-service: {{ .Release.Name }}
spec:
restartPolicy: Never
containers:
- name: certbot
image: ungleich/ungleich-certbot
ports:
- containerPort: 80
env:
- name: DOMAIN
value: "{{ tpl .Values.fqdn . }}"
- name: EMAIL
value: "{{ .Values.email }}"
{{ if eq .Values.letsencryptStaging "no" }}
- name: STAGING
value: "no"
{{ end }}
volumeMounts:
- name: etcletsencrypt
mountPath: "/etc/letsencrypt"
volumes:
- name: etcletsencrypt
persistentVolumeClaim:
claimName: {{ tpl .Values.identifier . }}-letsencrypt-certs
backoffLimit: 3
---
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ .Release.Name }}-nginx-config
data:
default.conf: |
server {
listen 443 ssl;
listen [::]:443 ssl;
server_name {{ .Release.Name }}-{{ .Values.serviceName }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }};
ssl_certificate /etc/letsencrypt/live/{{ tpl .Values.fqdn . }}/fullchain.pem;
ssl_certificate_key /etc/letsencrypt/live/{{ tpl .Values.fqdn . }}/privkey.pem;
client_max_body_size 256m;
location / {
proxy_pass http://localhost:3000;
}
}

View file

@ -1,28 +0,0 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "gitea.fullname" . }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "gitea.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View file

@ -1,61 +0,0 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "gitea.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -1,15 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "gitea.fullname" . }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "gitea.selectorLabels" . | nindent 4 }}

View file

@ -1,12 +0,0 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "gitea.serviceAccountName" . }}
labels:
{{- include "gitea.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -1,9 +1,7 @@
apiVersion: v1 apiVersion: v1
kind: Pod kind: Pod
metadata: metadata:
name: "{{ include "gitea.fullname" . }}-test-connection" name: "{{ tpl .Values.identifier . }}-test-connection"
labels:
{{- include "gitea.labels" . | nindent 4 }}
annotations: annotations:
"helm.sh/hook": test "helm.sh/hook": test
spec: spec:
@ -11,5 +9,5 @@ spec:
- name: wget - name: wget
image: busybox image: busybox
command: ['wget'] command: ['wget']
args: ['{{ include "gitea.fullname" . }}:{{ .Values.service.port }}'] args: ['{{ tpl .Values.fqdn . }}']
restartPolicy: Never restartPolicy: Never

View file

@ -1,82 +1,8 @@
# Default values for gitea. clusterDomain: c2.k8s.ooo
# This is a YAML-formatted file. email: technik@ungleich.ch
# Declare variables to be passed into your templates. letsencryptStaging: "yes"
replicaCount: 1 # This is how the service and the data volumes are named - i.e. the
# persistent thing
image: identifier: "{{ .Release.Name }}"
repository: nginx fqdn: "{{ .Release.Name }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}"
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View file

@ -24,22 +24,18 @@ defaults
timeout http-keep-alive 10s timeout http-keep-alive 10s
timeout check 10s timeout check 10s
frontend http frontend f_http
bind :80 bind ipv6@:80
mode http mode http
option httplog
http-request do-resolve(txn.myip,mydns,ipv6) hdr(Host),lower http-request do-resolve(txn.myip,mydns,ipv6) hdr(Host),lower
# http-request capture var(txn.myip) len 255
use_backend b_503 unless { var(txn.myip) -m found }
default_backend http # if DNS resolving did not work
# use_backend b_503 unless { var(txn.myip) -m found }
# dummy backend default_backend b_http
backend b_503
mode http
backend http backend b_http
mode http mode http
http-request deny unless { hdr(host) -i c2.k8s.ooo } http-request deny unless { hdr(host) -i c2.k8s.ooo }
http-request set-dst var(txn.myip) http-request set-dst var(txn.myip)
@ -47,9 +43,8 @@ backend http
# # HTTPs # # HTTPs
frontend f_https frontend f_https
bind :443 bind ipv6@:443
mode tcp mode tcp
option tcplog
tcp-request inspect-delay 5s tcp-request inspect-delay 5s
tcp-request content accept if { req_ssl_hello_type 1 } tcp-request content accept if { req_ssl_hello_type 1 }
@ -64,6 +59,5 @@ backend b_https
tcp-request content set-dst var(txn.myip) tcp-request content set-dst var(txn.myip)
server tcp_https ipv6@* server tcp_https ipv6@*
# tcp-request capture var(txn.myip) len 255
# tcp-request connection deny unless { hdr(host) -i c2.k8s.ooo } # tcp-request connection deny unless { hdr(host) -i c2.k8s.ooo }
# use_backend b_503 unless { var(txn.myip) -m found } # use_backend b_503 unless { var(txn.myip) -m found }