Subsections of Application
Install Act Runner
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link3. CertManager has installed, if not check 🔗link4. Ingress has installed, if not check 🔗link1.get helm repo
Details
helm repo add gringolito https://gringolito.github.io/helm-charts
helm repo update
2.install chart
Details
helm install act-runner gringolito/act-runner --generate-name
Using Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/act-runner --generate-name --version 0.2.0
for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link1.prepare `act-runner-secret`
Details
kubectl -n application create secret generic act-runner-secret \
--from-literal=act-runner-token=4w3Sx0Hwe6VFevl473ZZ4nFVDvFvhKcEUBvpJ09L
2.prepare
act-runner.yaml
kubectl -n argocd apply -f - <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: act-runner
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://gringolito.github.io/helm-charts
chart: act-runner
targetRevision: 0.2.0
helm:
releaseName: act-runner
values: |
image:
name: gitea/act_runner
tag: "0.2.13"
repository: m.daocloud.io/docker.io
podAnnotations:
container.apparmor.security.beta.kubernetes.io/dind: unconfined
runner:
instanceURL: http://10.200.60.64:30300 # https://gitea.ay.dev:32443
token:
fromSecret:
name: "act-runner-secret"
key: "act-runner-token"
dockerDind:
enabled: true
image: docker:23.0.6-dind
config:
enabled: true
data: |
log:
level: info
runner:
labels:
- ubuntu-latest:docker://docker.gitea.com/runner-images:ubuntu-latest
container:
force_pull: true
persistence:
enabled: true
storageClassName: ""
accessModes: ReadWriteOnce
size: 1Gi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
replicas: 1
securityContext:
privileged: true
runAsUser: 0
runAsGroup: 0
fsGroup: 0
capabilities:
add: ["NET_ADMIN", "SYS_ADMIN"]
podSecurityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1000m
memory: 2048Mi
destination:
server: https://kubernetes.default.svc
namespace: application
EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: act-runner
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://gringolito.github.io/helm-charts
chart: act-runner
targetRevision: 0.2.0
helm:
releaseName: act-runner
values: |
image:
name: gitea/act_runner
tag: "0.2.13"
repository: m.daocloud.io/docker.io
runner:
instanceURL: http://10.200.60.64:30300 # https://gitea.ay.dev:32443
token:
fromSecret:
name: "act-runner-secret"
key: "act-runner-token"
config:
enabled: true
data: |
log:
level: info
runner:
labels:
- ubuntu-latest:docker://m.daocloud.io/docker.io/gitea/runner-images:ubuntu-latest
- ubuntu-22.04:docker://m.daocloud.io/docker.io/gitea/runner-images:ubuntu-22.04
- ubuntu-20.04:docker://m.daocloud.io/docker.io/gitea/runner-images:ubuntu-20.04
container:
force_pull: true
persistence:
enabled: true
storageClassName: ""
accessModes: ReadWriteOnce
size: 1Gi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
replicas: 1
securityContext:
privileged: true
runAsUser: 0
runAsGroup: 0
fsGroup: 0
capabilities:
add: ["NET_ADMIN", "SYS_ADMIN"]
podSecurityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1000m
memory: 2048Mi
destination:
server: https://kubernetes.default.svc
namespace: application
4.sync by argocd
Details
argocd app sync argocd/act-runner
5.use action
Preliminary
1. Docker 2. Podman has installed, and the `podman` command is available in your PATH.1.prepare data and config dir
Details
mkdir -p /opt/gitea_act_runner/{data,config} \
&& chown -R 1000:1000 /opt/gitea_act_runner \
&& chmod -R 755 /opt/gitea_act_runner
2.run container
Details
podman run -it \
--name gitea_act_runner \
--rm \
--privileged \
--network=host \
-v /opt/gitea_act_runner/data:/data \
-v /opt/gitea_act_runner/config:/config \
-v /var/run/podman/podman.sock:/var/run/docker.sock \
-e GITEA_INSTANCE_URL="http://10.200.60.64:30300" \
-e GITEA_RUNNER_REGISTRATION_TOKEN="5lgsrOzfKz3RiqeMWxxUb9RmUPEWNnZ6hTTZV0DL" \
m.daocloud.io/docker.io/gitea/act_runner:latest-dind-rootless
Using Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/act-runner --generate-name --version 0.2.0
for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Docker 2. Podman has installed, and the `podman` command is available in your PATH.1.prepare data and config dir
Details
mkdir -p /opt/gitea_act_runner/{data,config} \
&& chown -R 1000:1000 /opt/gitea_act_runner \
&& chmod -R 755 /opt/gitea_act_runner
2.run container
Details
docker run -it \
--name gitea_act_runner \
--rm \
--privileged \
--network=host \
-v /opt/gitea_act_runner/data:/data \
-v /opt/gitea_act_runner/config:/config \
-e GITEA_INSTANCE_URL="http://192.168.100.125:30300" \
-e GITEA_RUNNER_REGISTRATION_TOKEN="5lgsrOzfKz3RiqeMWxxUb9RmUPEWNnZ6hTTZV0DL" \
m.daocloud.io/docker.io/gitea/act_runner:latest-dind
Using Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/act-runner --generate-name --version 0.2.0
for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
FAQ
Install Chart Museum
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
Using Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/cert-manager --generate-name --version 1.17.2
for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link5. Minio has installed, if not check 🔗link1.prepare `chart-museum-credentials`
kubectl get namespaces basic-components > /dev/null 2>&1 || kubectl create namespace basic-components
kubectl -n basic-components create secret generic chart-museum-credentials \
--from-literal=username=admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
kubectl get namespaces basic-components > /dev/null 2>&1 || kubectl create namespace basic-components
kubectl -n basic-components create secret generic chart-museum-credentials \
--from-literal=username=admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=aws_access_key_id=$(kubectl -n storage get secret minio-credentials -o jsonpath='{.data.rootUser}' | base64 -d) \
--from-literal=aws_secret_access_key=$(kubectl -n storage get secret minio-credentials -o jsonpath='{.data.rootPassword}' | base64 -d)
2.prepare `chart-museum.yaml`
kubectl apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: chart-museum
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://chartmuseum.github.io/charts
chart: chartmuseum
targetRevision: 3.10.3
helm:
releaseName: chart-museum
values: |
replicaCount: 1
image:
repository: m.daocloud.io/ghcr.io/helm/chartmuseum
env:
open:
DISABLE_API: false
STORAGE: local
AUTH_ANONYMOUS_GET: true
existingSecret: "chart-museum-credentials"
existingSecretMappings:
BASIC_AUTH_USER: "username"
BASIC_AUTH_PASS: "password"
persistence:
enabled: false
storageClass: ""
volumePermissions:
image:
registry: m.daocloud.io/docker.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hosts:
- name: chartmuseum.ay.dev
path: /?(.*)
tls: true
tlsSecret: chartmuseum.ay.dev-tls
destination:
server: https://kubernetes.default.svc
namespace: basic-components
EOF
kubectl apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: chart-museum
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://chartmuseum.github.io/charts
chart: chartmuseum
targetRevision: 3.10.3
helm:
releaseName: chart-museum
values: |
replicaCount: 1
image:
repository: m.daocloud.io/ghcr.io/helm/chartmuseum
env:
open:
DISABLE_API: false
STORAGE: amazon
STORAGE_AMAZON_ENDPOINT: http://minio-api.ay.dev:32080
STORAGE_AMAZON_BUCKET: chart-museum
STORAGE_AMAZON_PREFIX: charts
STORAGE_AMAZON_REGION: us-east-1
AUTH_ANONYMOUS_GET: true
existingSecret: "chart-museum-credentials"
existingSecretMappings:
BASIC_AUTH_USER: "username"
BASIC_AUTH_PASS: "password"
AWS_ACCESS_KEY_ID: "aws_access_key_id"
AWS_SECRET_ACCESS_KEY: "aws_secret_access_key"
persistence:
enabled: false
storageClass: ""
volumePermissions:
image:
registry: m.daocloud.io/docker.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hosts:
- name: chartmuseum.ay.dev
path: /?(.*)
tls: true
tlsSecret: chartmuseum.ay.dev-tls
destination:
server: https://kubernetes.default.svc
namespace: basic-components
EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: chart-museum
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://chartmuseum.github.io/charts
chart: chartmuseum
targetRevision: 3.10.3
helm:
releaseName: chart-museum
values: |
replicaCount: 1
image:
repository: m.daocloud.io/ghcr.io/helm/chartmuseum
env:
open:
DISABLE_API: false
STORAGE: local
AUTH_ANONYMOUS_GET: true
existingSecret: "chart-museum-credentials"
existingSecretMappings:
BASIC_AUTH_USER: "username"
BASIC_AUTH_PASS: "password"
persistence:
enabled: false
storageClass: ""
volumePermissions:
image:
registry: m.daocloud.io/docker.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hosts:
- name: chartmuseum.ay.dev
path: /?(.*)
tls: true
tlsSecret: chartmuseum.ay.dev-tls
destination:
server: https://kubernetes.default.svc
namespace: basic-components
3.sync by argocd
Details
argocd app sync argocd/chart-museum
Uploading a Chart Package
Follow “How to Run” section below to get ChartMuseum up and running at http://localhost:8080
First create mychart-0.1.0.tgz
using the Helm CLI:
cd mychart/
helm package .
Upload mychart-0.1.0.tgz
:
curl --data-binary "@mychart-0.1.0.tgz" http://localhost:8080/api/charts
If you’ve signed your package and generated a provenance file, upload it with:
curl --data-binary "@mychart-0.1.0.tgz.prov" http://localhost:8080/api/prov
Both files can also be uploaded at once (or one at a time) on the /api/charts
route using the multipart/form-data
format:
curl -F "chart=@mychart-0.1.0.tgz" -F "prov=@mychart-0.1.0.tgz.prov" http://localhost:8080/api/charts
You can also use the helm-push plugin:
helm cm-push mychart/ chartmuseum
Installing Charts into Kubernetes
Add the URL to your ChartMuseum installation to the local repository list:
helm repo add chartmuseum http://localhost:8080
Search for charts:
helm search repo chartmuseum/
Install chart:
helm install chartmuseum/mychart --generate-name
FAQ
Install Flink Operator
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. Cert-manager has installed, if not check 🔗link1.get helm repo
Details
helm repo add flink-operator-repo https://downloads.apache.org/flink/flink-kubernetes-operator-1.11.0/
helm repo update
latest version : 🔗https://flink.apache.org/downloads/#apache-flink-kubernetes-operator
2.install chart
Details
helm install --create-namespace -n flink flink-kubernetes-operator flink-operator-repo/flink-kubernetes-operator --set image.repository=m.lab.zverse.space/ghcr.io/apache/flink-kubernetes-operator --set image.tag=1.11.0 --set webhook.create=false
Reference
for more information, you can check 🔗https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main/zh/docs/try-flink-kubernetes-operator/quick-start/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Cert-manager has installed on argocd and the clusterissuer has a named self-signed-ca-issuer service , if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link2.prepare `flink-operator.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: flink-operator
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://downloads.apache.org/flink/flink-kubernetes-operator-1.11.0
chart: flink-kubernetes-operator
targetRevision: 1.11.0
helm:
releaseName: flink-operator
values: |
image:
repository: m.daocloud.io/ghcr.io/apache/flink-kubernetes-operator
pullPolicy: IfNotPresent
tag: "1.11.0"
version: v3
destination:
server: https://kubernetes.default.svc
namespace: flink
EOF
3.sync by argocd
Details
argocd app sync argocd/flink-operator
FAQ
Deploy GateKeeper Server
Official Website: https://open-policy-agent.github.io/gatekeeper/website/
Preliminary
- Kubernetes 版本必须大于
v1.16
Components
Gatekeeper 是基于 Open Policy Agent(OPA) 构建的 Kubernetes 准入控制器,它允许用户定义和实施自定义策略,以控制 Kubernetes 集群中资源的创建、更新和删除操作
- 核心组件
- 约束模板(Constraint Templates):定义策略的规则逻辑,使用 Rego 语言编写。它是策略的抽象模板,可以被多个约束实例(Constraint Instance)复用。
- 约束实例(Constraints Instance):基于约束模板创建的具体策略实例,指定了具体的参数和匹配规则,用于定义哪些资源需要应用该策略。
- 准入控制器(Admission Controller)(无需修改):拦截 Kubernetes API Server 的请求,根据定义的约束对请求进行评估,如果请求违反了任何约束,则拒绝该请求。
Features
约束管理
自定义约束模板:用户可以使用 Rego 语言编写自定义的约束模板,实现各种复杂的策略逻辑。
例如,可以定义策略要求所有的命名空间 NameSpace 必须设置特定的标签,或者限制某些命名空间只能使用特定的镜像。
约束模板复用:约束模板可以被多个约束实例复用,提高了策略的可维护性和复用性。
例如,可以创建一个通用的标签约束模板,然后在不同的命名空间 NameSpace 中创建不同的约束实例,要求不同的标签。
约束更新:当约束模板或约束发生更新时,Gatekeeper 会自动重新评估所有相关的资源,确保策略的实时生效。
资源控制
准入拦截:当有资源创建或更新请求时,Gatekeeper 会实时拦截请求,并根据策略进行评估。如果请求违反了策略,会立即拒绝请求,并返回详细的错误信息,帮助用户快速定位问题。
资源创建和更新限制:Gatekeeper 可以阻止不符合策略的资源创建和更新请求。
例如,如果定义了一个策略要求所有的 Deployment 必须设置资源限制(requests 和 limits),那么当用户尝试创建或更新一个没有设置资源限制的 Deployment 时,请求将被拒绝。
通过enforcementAction来控制,可选:dryrun | deny | warn
check https://open-policy-agent.github.io/gatekeeper-library/website/validation/containerlimits
资源类型过滤:可以通过约束的 match 字段指定需要应用策略的资源类型和命名空间。
例如,可以只对特定命名空间中的 Pod 应用策略,或者只对特定 API 组和版本的资源应用策略。
可以通过syncSet (同步配置)来指定过滤和忽略那些资源
合规性保证
行业标准和自定义规范:Gatekeeper 可以确保 Kubernetes 集群中的资源符合行业标准和管理员要求的内部的安全规范。
例如,可以定义策略要求所有的容器必须使用最新的安全补丁,或者要求所有的存储卷必须进行加密。
Gatekeeper 已经提供近50种各类资源限制的约束策略,可以通过访问https://open-policy-agent.github.io/gatekeeper-library/website/ 查看并获得
审计和报告:Gatekeeper 可以记录所有的策略评估结果,方便管理员进行审计和报告。通过查看审计日志,管理员可以了解哪些资源违反了策略,以及违反了哪些策略。
审计导出:审计日志可以导出并接入下游。
详细信息可以查看https://open-policy-agent.github.io/gatekeeper/website/docs/pubsub/
Installation
kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/v3.18.2/deploy/gatekeeper.yaml
helm repo add gatekeeper https://open-policy-agent.github.io/gatekeeper/charts
helm install gatekeeper/gatekeeper --name-template=gatekeeper --namespace gatekeeper-system --create-namespace
Make sure that:
- You have Docker version 20.10 or later installed.
- Your kubectl context is set to the desired installation cluster.
- You have a container registry you can write to that is readable by the target cluster.
git clone https://github.com/open-policy-agent/gatekeeper.git \
&& cd gatekeeper
- Build and push Gatekeeper image:
export DESTINATION_GATEKEEPER_IMAGE=<add registry like "myregistry.docker.io/gatekeeper">
make docker-buildx REPOSITORY=$DESTINATION_GATEKEEPER_IMAGE OUTPUT_TYPE=type=registry
- And the deploy
make deploy REPOSITORY=$DESTINATION_GATEKEEPER_IMAGE
Install Gitea
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link3. CertManager has installed, if not check 🔗link4. Ingress has installed, if not check 🔗link1.get helm repo
Details
helm repo add gitea-charts https://dl.gitea.com/charts/
helm repo update
2.install chart
Details
helm install gitea gitea-charts/gitea --generate-name
Using Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/gitea --generate-name --version 12.1.3
for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link5. Minio has installed, if not check 🔗link1.prepare `chart-museum-credentials`
kubectl get namespaces application > /dev/null 2>&1 || kubectl create namespace application
kubectl -n application create secret generic gitea-admin-credentials \
--from-literal=username=gitea_admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
kubectl get namespaces application > /dev/null 2>&1 || kubectl create namespace application
kubectl -n application create secret generic gitea-admin-credentials \
--from-literal=username=gitea_admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.prepare `gitea.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: gitea
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://dl.gitea.com/charts/
chart: gitea
targetRevision: 10.1.4
helm:
releaseName: gitea
values: |
image:
registry: m.daocloud.io/docker.io
service:
http:
type: NodePort
port: 3000
nodePort: 30300
ssh:
type: NodePort
port: 22
nodePort: 32022
ingress:
enabled: true
ingressClassName: nginx
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /$1
cert-manager.io/cluster-issuer: self-signed-ca-issuer
hosts:
- host: gitea.ay.dev
paths:
- path: /?(.*)
pathType: ImplementationSpecific
tls:
- secretName: gitea.ay.dev-tls
hosts:
- gitea.ay.dev
persistence:
enabled: true
size: 8Gi
storageClass: ""
redis-cluster:
enabled: false
postgresql-ha:
enabled: false
postgresql:
enabled: true
architecture: standalone
image:
registry: m.daocloud.io/docker.io
primary:
persistence:
enabled: false
storageClass: ""
size: 8Gi
readReplicas:
replicaCount: 1
persistence:
enabled: true
storageClass: ""
size: 8Gi
backup:
enabled: false
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
gitea:
admin:
existingSecret: gitea-admin-credentials
email: aaron19940628@gmail.com
config:
database:
DB_TYPE: postgres
session:
PROVIDER: db
cache:
ADAPTER: memory
queue:
TYPE: level
indexer:
ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true
repository:
MAX_CREATION_LIMIT: 10
DISABLED_REPO_UNITS: "repo.wiki,repo.ext_wiki,repo.projects"
DEFAULT_REPO_UNITS: "repo.code,repo.releases,repo.issues,repo.pulls"
server:
PROTOCOL: http
LANDING_PAGE: login
DOMAIN: gitea.ay.dev
ROOT_URL: https://gitea.ay.dev:32443/
SSH_DOMAIN: ssh.gitea.ay.dev
SSH_PORT: 32022
SSH_AUTHORIZED_PRINCIPALS_ALLOW: email
admin:
DISABLE_REGULAR_ORG_CREATION: true
security:
INSTALL_LOCK: true
service:
REGISTER_EMAIL_CONFIRM: true
DISABLE_REGISTRATION: true
ENABLE_NOTIFY_MAIL: false
DEFAULT_ALLOW_CREATE_ORGANIZATION: false
SHOW_MILESTONES_DASHBOARD_PAGE: false
migrations:
ALLOW_LOCALNETWORKS: true
mailer:
ENABLED: false
i18n:
LANGS: "en-US,zh-CN"
NAMES: "English,简体中文"
oauth2:
ENABLE: false
destination:
server: https://kubernetes.default.svc
namespace: application
sssss
3.apply to k8s
Details
kubectl -n argocd apply -f gitea.yaml
4.sync by argocd
Details
argocd app sync argocd/gitea
5.decode admin password
login 🔗https://gitea.ay.dev:32443/, using user gitea_admin and passwordDetails
kubectl -n application get secret gitea-admin-credentials -o jsonpath='{.data.password}' | base64 -d