Subsections of 🪀Software
Application
Subsections of Application
Install Cert Manager
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link1.get helm repo
helm repo add cert-manager-repo https://charts.jetstack.io
helm repo update
2.install chart
helm install cert-manager-repo/cert-manager --generate-name --version 1.17.2
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/cert-manager --generate-name --version 1.17.2
for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link1.prepare `cert-manager.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: cert-manager
targetRevision: 1.17.2
helm:
releaseName: cert-manager
values: |
installCRDs: true
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-controller
tag: v1.17.2
webhook:
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-webhook
tag: v1.17.2
cainjector:
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-cainjector
tag: v1.17.2
acmesolver:
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-acmesolver
tag: v1.17.2
startupapicheck:
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-startupapicheck
tag: v1.17.2
destination:
server: https://kubernetes.default.svc
namespace: basic-components
EOF
3.sync by argocd
argocd app sync argocd/cert-manager
4.prepare self-signed.yaml
kubectl apply -f - <<EOF
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: basic-components
name: self-signed-issuer
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
namespace: basic-components
name: my-self-signed-ca
spec:
isCA: true
commonName: my-self-signed-ca
secretName: root-secret
privateKey:
algorithm: ECDSA
size: 256
issuerRef:
name: self-signed-issuer
kind: Issuer
group: cert-manager.io
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: self-signed-ca-issuer
spec:
ca:
secretName: root-secret
EOF
Preliminary
1. Docker|Podman|Buildah has installed, if not check 🔗link1.just run
docker run --name cert-manager -e ALLOW_EMPTY_PASSWORD=yes bitnami/cert-manager:latest
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
docker run --name cert-manager \
-e ALLOW_EMPTY_PASSWORD=yes
m.daocloud.io/docker.io/bitnami/cert-manager:latest
FAQ
Install Chart Museum
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link1.get helm repo
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/cert-manager --generate-name --version 1.17.2
for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link5. Minio has installed, if not check 🔗link1.prepare `chart-museum-credentials`
kubectl get namespaces basic-components > /dev/null 2>&1 || kubectl create namespace basic-components
kubectl -n basic-components create secret generic chart-museum-credentials \
--from-literal=username=admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
kubectl get namespaces basic-components > /dev/null 2>&1 || kubectl create namespace basic-components
kubectl -n basic-components create secret generic chart-museum-credentials \
--from-literal=username=admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=aws_access_key_id=$(kubectl -n storage get secret minio-credentials -o jsonpath='{.data.rootUser}' | base64 -d) \
--from-literal=aws_secret_access_key=$(kubectl -n storage get secret minio-credentials -o jsonpath='{.data.rootPassword}' | base64 -d)
2.prepare `chart-museum.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: chart-museum
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://chartmuseum.github.io/charts
chart: chartmuseum
targetRevision: 3.10.3
helm:
releaseName: chart-museum
values: |
replicaCount: 1
image:
repository: ghcr.io/helm/chartmuseum
env:
open:
DISABLE_API: false
STORAGE: local
AUTH_ANONYMOUS_GET: true
existingSecret: "chart-museum-credentials"
existingSecretMappings:
BASIC_AUTH_USER: "username"
BASIC_AUTH_PASS: "password"
persistence:
enabled: false
storageClass: ""
volumePermissions:
image:
registry: m.daocloud.io/docker.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hosts:
- name: chartmuseum.ay.dev
path: /?(.*)
tls: true
tlsSecret: chartmuseum.ay.dev-tls
destination:
server: https://kubernetes.default.svc
namespace: basic-components
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: chart-museum
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://chartmuseum.github.io/charts
chart: chartmuseum
targetRevision: 3.10.3
helm:
releaseName: chart-museum
values: |
replicaCount: 1
image:
repository: ghcr.io/helm/chartmuseum
env:
open:
DISABLE_API: false
STORAGE: amazon
STORAGE_AMAZON_ENDPOINT: http://minio-api.ay.dev:32080
STORAGE_AMAZON_BUCKET: chart-museum
STORAGE_AMAZON_PREFIX: charts
STORAGE_AMAZON_REGION: us-east-1
AUTH_ANONYMOUS_GET: true
existingSecret: "chart-museum-credentials"
existingSecretMappings:
BASIC_AUTH_USER: "username"
BASIC_AUTH_PASS: "password"
AWS_ACCESS_KEY_ID: "aws_access_key_id"
AWS_SECRET_ACCESS_KEY: "aws_secret_access_key"
persistence:
enabled: false
storageClass: ""
volumePermissions:
image:
registry: m.daocloud.io/docker.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hosts:
- name: chartmuseum.ay.dev
path: /?(.*)
tls: true
tlsSecret: chartmuseum.ay.dev-tls
destination:
server: https://kubernetes.default.svc
namespace: basic-components
3.sync by argocd
argocd app sync argocd/chart-museum
install based on docker
echo "start from head is important"
Uploading a Chart Package
Follow “How to Run” section below to get ChartMuseum up and running at http://localhost:8080
First create mychart-0.1.0.tgz
using the Helm CLI:
cd mychart/
helm package .
Upload mychart-0.1.0.tgz
:
curl --data-binary "@mychart-0.1.0.tgz" http://localhost:8080/api/charts
If you’ve signed your package and generated a provenance file, upload it with:
curl --data-binary "@mychart-0.1.0.tgz.prov" http://localhost:8080/api/prov
Both files can also be uploaded at once (or one at a time) on the /api/charts
route using the multipart/form-data
format:
curl -F "chart=@mychart-0.1.0.tgz" -F "prov=@mychart-0.1.0.tgz.prov" http://localhost:8080/api/charts
You can also use the helm-push plugin:
helm cm-push mychart/ chartmuseum
Installing Charts into Kubernetes
Add the URL to your ChartMuseum installation to the local repository list:
helm repo add chartmuseum http://localhost:8080
Search for charts:
helm search repo chartmuseum/
Install chart:
helm install chartmuseum/mychart --generate-name
FAQ
Install Flink Operator
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. Cert-manager has installed and the clusterissuer has a named self-signed-ca-issuer service, , if not check 🔗link1.get helm repo
helm repo add flink-operator-repo https://downloads.apache.org/flink/flink-kubernetes-operator-1.11.0/
helm repo update
latest version : 🔗https://flink.apache.org/downloads/#apache-flink-kubernetes-operator
2.install chart
helm install flink-kubernetes-operator flink-operator-repo/flink-kubernetes-operator --set image.repository=apache/flink-kubernetes-operator --set webhook.create=false
for more information, you can check 🔗https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main/zh/docs/try-flink-kubernetes-operator/quick-start/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Cert-manager has installed on argocd and the clusterissuer has a named self-signed-ca-issuer service , if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link2.prepare `flink-operator.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: flink-operator
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://downloads.apache.org/flink/flink-kubernetes-operator-1.11.0
chart: flink-kubernetes-operator
targetRevision: 1.11.0
helm:
releaseName: flink-operator
values: |
image:
repository: m.daocloud.io/ghcr.io/apache/flink-kubernetes-operator
pullPolicy: IfNotPresent
tag: "1.11.0"
version: v3
destination:
server: https://kubernetes.default.svc
namespace: flink
EOF
3.sync by argocd
argocd app sync argocd/flink-operator
FAQ
Deploy GateKeeper Server
Official Website: https://open-policy-agent.github.io/gatekeeper/website/
Preliminary
- Kubernetes 版本必须大于
v1.16
Components
Gatekeeper 是基于 Open Policy Agent(OPA) 构建的 Kubernetes 准入控制器,它允许用户定义和实施自定义策略,以控制 Kubernetes 集群中资源的创建、更新和删除操作
- 核心组件
- 约束模板(Constraint Templates):定义策略的规则逻辑,使用 Rego 语言编写。它是策略的抽象模板,可以被多个约束实例(Constraint Instance)复用。
- 约束实例(Constraints Instance):基于约束模板创建的具体策略实例,指定了具体的参数和匹配规则,用于定义哪些资源需要应用该策略。
- 准入控制器(Admission Controller)(无需修改):拦截 Kubernetes API Server 的请求,根据定义的约束对请求进行评估,如果请求违反了任何约束,则拒绝该请求。
Features
约束管理
自定义约束模板:用户可以使用 Rego 语言编写自定义的约束模板,实现各种复杂的策略逻辑。
例如,可以定义策略要求所有的命名空间 NameSpace 必须设置特定的标签,或者限制某些命名空间只能使用特定的镜像。
约束模板复用:约束模板可以被多个约束实例复用,提高了策略的可维护性和复用性。
例如,可以创建一个通用的标签约束模板,然后在不同的命名空间 NameSpace 中创建不同的约束实例,要求不同的标签。
约束更新:当约束模板或约束发生更新时,Gatekeeper 会自动重新评估所有相关的资源,确保策略的实时生效。
资源控制
准入拦截:当有资源创建或更新请求时,Gatekeeper 会实时拦截请求,并根据策略进行评估。如果请求违反了策略,会立即拒绝请求,并返回详细的错误信息,帮助用户快速定位问题。
资源创建和更新限制:Gatekeeper 可以阻止不符合策略的资源创建和更新请求。
例如,如果定义了一个策略要求所有的 Deployment 必须设置资源限制(requests 和 limits),那么当用户尝试创建或更新一个没有设置资源限制的 Deployment 时,请求将被拒绝。
通过enforcementAction来控制,可选:dryrun | deny | warn
check https://open-policy-agent.github.io/gatekeeper-library/website/validation/containerlimits
资源类型过滤:可以通过约束的 match 字段指定需要应用策略的资源类型和命名空间。
例如,可以只对特定命名空间中的 Pod 应用策略,或者只对特定 API 组和版本的资源应用策略。
可以通过syncSet (同步配置)来指定过滤和忽略那些资源
合规性保证
行业标准和自定义规范:Gatekeeper 可以确保 Kubernetes 集群中的资源符合行业标准和管理员要求的内部的安全规范。
例如,可以定义策略要求所有的容器必须使用最新的安全补丁,或者要求所有的存储卷必须进行加密。
Gatekeeper 已经提供近50种各类资源限制的约束策略,可以通过访问https://open-policy-agent.github.io/gatekeeper-library/website/ 查看并获得
审计和报告:Gatekeeper 可以记录所有的策略评估结果,方便管理员进行审计和报告。通过查看审计日志,管理员可以了解哪些资源违反了策略,以及违反了哪些策略。
审计导出:审计日志可以导出并接入下游。
详细信息可以查看https://open-policy-agent.github.io/gatekeeper/website/docs/pubsub/
Installation
kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/v3.18.2/deploy/gatekeeper.yaml
helm repo add gatekeeper https://open-policy-agent.github.io/gatekeeper/charts
helm install gatekeeper/gatekeeper --name-template=gatekeeper --namespace gatekeeper-system --create-namespace
Make sure that:
- You have Docker version 20.10 or later installed.
- Your kubectl context is set to the desired installation cluster.
- You have a container registry you can write to that is readable by the target cluster.
git clone https://github.com/open-policy-agent/gatekeeper.git \
&& cd gatekeeper
- Build and push Gatekeeper image:
export DESTINATION_GATEKEEPER_IMAGE=<add registry like "myregistry.docker.io/gatekeeper">
make docker-buildx REPOSITORY=$DESTINATION_GATEKEEPER_IMAGE OUTPUT_TYPE=type=registry
- And the deploy
make deploy REPOSITORY=$DESTINATION_GATEKEEPER_IMAGE
Binary
Subsections of Binary
Argo Workflow Binary
MIRROR="files.m.daocloud.io/"
VERSION=v3.5.4
curl -sSLo argo-linux-amd64.gz "https://${MIRROR}github.com/argoproj/argo-workflows/releases/download/${VERSION}/argo-linux-amd64.gz"
gunzip argo-linux-amd64.gz
chmod u+x argo-linux-amd64
mkdir -p ${HOME}/bin
mv -f argo-linux-amd64 ${HOME}/bin/argo
rm -f argo-linux-amd64.gz
ArgoCD Binary
MIRROR="files.m.daocloud.io/"
VERSION=v2.9.3
[ $(uname -m) = x86_64 ] && curl -sSLo argocd "https://${MIRROR}github.com/argoproj/argo-cd/releases/download/${VERSION}/argocd-linux-amd64"
[ $(uname -m) = aarch64 ] && curl -sSLo argocd "https://${MIRROR}github.com/argoproj/argo-cd/releases/download/${VERSION}/argocd-linux-arm64"
chmod u+x argocd
mkdir -p ${HOME}/bin
mv -f argocd ${HOME}/bin
[Optional] add to PATH
cat >> ~/.bashrc << EOF
export PATH=$PATH:/root/bin
EOF
source ~/.bashrc
Golang Binary
# sudo rm -rf /usr/local/go # 删除旧版本
wget https://go.dev/dl/go1.24.4.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.24.4.linux-amd64.tar.gz
vim ~/.bashrc
export PATH=$PATH:/usr/local/go/bin
source ~/.bashrc
rm -rf ./go1.24.4.linux-amd64.tar.gz
Helm Binary
ARCH_IN_FILE_NAME=linux-amd64
FILE_NAME=helm-v3.18.3-${ARCH_IN_FILE_NAME}.tar.gz
curl -sSLo ${FILE_NAME} "https://files.m.daocloud.io/get.helm.sh/${FILE_NAME}"
tar zxf ${FILE_NAME}
mkdir -p ${HOME}/bin
mv -f ${ARCH_IN_FILE_NAME}/helm ${HOME}/bin
rm -rf ./${FILE_NAME}
rm -rf ./${ARCH_IN_FILE_NAME}
chmod u+x ${HOME}/bin/helm
JQ Binary
jq
JQ_VERSION=1.7
JQ_BINARY=jq-linux64
wget https://github.com/stedolan/jq/releases/download/jq-${JQ_VERSION}/${JQ_BINARY}.tar.gz -O - | tar xz && mv ${JQ_BINARY} /usr/bin/jq
Kind Binary
MIRROR="files.m.daocloud.io/"
VERSION=v0.29.0
[ $(uname -m) = x86_64 ] && curl -sSLo kind "https://${MIRROR}github.com/kubernetes-sigs/kind/releases/download/${VERSION}/kind-linux-amd64"
[ $(uname -m) = aarch64 ] && curl -sSLo kind "https://${MIRROR}github.com/kubernetes-sigs/kind/releases/download/${VERSION}/kind-linux-arm64"
chmod u+x kind
mkdir -p ${HOME}/bin
mv -f kind ${HOME}/bin
Krew Binary
cd "$(mktemp -d)" &&
OS="$(uname | tr '[:upper:]' '[:lower:]')" &&
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" &&
KREW="krew-${OS}_${ARCH}" &&
curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" &&
tar zxvf "${KREW}.tar.gz" &&
./"${KREW}" install krew
Kubectl Binary
MIRROR="files.m.daocloud.io/"
VERSION=$(curl -L -s https://${MIRROR}dl.k8s.io/release/stable.txt)
[ $(uname -m) = x86_64 ] && curl -sSLo kubectl "https://${MIRROR}dl.k8s.io/release/${VERSION}/bin/linux/amd64/kubectl"
[ $(uname -m) = aarch64 ] && curl -sSLo kubectl "https://${MIRROR}dl.k8s.io/release/${VERSION}/bin/linux/arm64/kubectl"
chmod u+x kubectl
mkdir -p ${HOME}/bin
mv -f kubectl ${HOME}/bin
Maven Binary
wget https://dlcdn.apache.org/maven/maven-3/3.9.6/binaries/apache-maven-3.9.6-bin.tar.gz
tar xzf apache-maven-3.9.6-bin.tar.gz -C /usr/local
ln -sfn /usr/local/apache-maven-3.9.6/bin/mvn /root/bin/mvn
export PATH=$PATH:/usr/local/apache-maven-3.9.6/bin
source ~/.bashrc
Minikube Binary
MIRROR="files.m.daocloud.io/"
[ $(uname -m) = x86_64 ] && curl -sSLo minikube "https://${MIRROR}storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64"
[ $(uname -m) = aarch64 ] && curl -sSLo minikube "https://${MIRROR}storage.googleapis.com/minikube/releases/latest/minikube-linux-arm64"
chmod u+x minikube
mkdir -p ${HOME}/bin
mv -f minikube ${HOME}/bin
Open Java
mkdir -p /etc/apt/keyrings && \
wget -qO - https://packages.adoptium.net/artifactory/api/gpg/key/public | gpg --dearmor -o /etc/apt/keyrings/adoptium.gpg && \
echo "deb [signed-by=/etc/apt/keyrings/adoptium.gpg arch=amd64] https://packages.adoptium.net/artifactory/deb $(awk -F= '/^VERSION_CODENAME/{print$2}' /etc/os-release) main" | tee /etc/apt/sources.list.d/adoptium.list > /dev/null && \
apt-get update && \
apt-get install -y temurin-21-jdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*
YQ Binary
YQ_VERSION=v4.40.5
YQ_BINARY=yq_linux_amd64
wget https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/${YQ_BINARY}.tar.gz -O - | tar xz && mv ${YQ_BINARY} /usr/bin/yq
CICD
Articles
FQA
Subsections of CICD
Install Argo CD
Preliminary
1. install argoCD binary
2. install components
crds:
install: true
keep: false
global:
revisionHistoryLimit: 3
image:
repository: m.daocloud.io/quay.io/argoproj/argocd
imagePullPolicy: IfNotPresent
redis:
enabled: true
image:
repository: m.daocloud.io/docker.io/library/redis
exporter:
enabled: false
image:
repository: m.daocloud.io/bitnami/redis-exporter
metrics:
enabled: false
redis-ha:
enabled: false
image:
repository: m.daocloud.io/docker.io/library/redis
configmapTest:
repository: m.daocloud.io/docker.io/koalaman/shellcheck
haproxy:
enabled: false
image:
repository: m.daocloud.io/docker.io/library/haproxy
exporter:
enabled: false
image: m.daocloud.io/docker.io/oliver006/redis_exporter
dex:
enabled: true
image:
repository: m.daocloud.io/ghcr.io/dexidp/dex
helm install argo-cd argo-cd \
--namespace argocd \
--create-namespace \
--version 5.46.7 \
--repo https://aaronyang0628.github.io/helm-chart-mirror/charts \
--values argocd.values.yaml \
--atomic
by default you can install argocd by this link
kubectl create namespace argocd \
&& kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
4. prepare argocd-server-external.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: server
app.kubernetes.io/instance: argo-cd
app.kubernetes.io/name: argocd-server-external
app.kubernetes.io/part-of: argocd
app.kubernetes.io/version: v2.8.4
name: argocd-server-external
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: 8080
nodePort: 30443
selector:
app.kubernetes.io/instance: argo-cd
app.kubernetes.io/name: argocd-server
type: NodePort
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: server
app.kubernetes.io/instance: argo-cd
app.kubernetes.io/name: argocd-server-external
app.kubernetes.io/part-of: argocd
app.kubernetes.io/version: v2.8.4
name: argocd-server-external
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: 8080
nodePort: 30443
selector:
app.kubernetes.io/name: argocd-server
type: NodePort
5. create external service
kubectl -n argocd apply -f argocd-server-external.yaml
6. get argocd initialized password
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d
7. login argocd
ARGOCD_PASS=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
MASTER_IP=$(kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o jsonpath='{$.items[0].status.addresses[?(@.type=="InternalIP")].address}')
argocd login --insecure --username admin $MASTER_IP:30443 --password $ARGOCD_PASS
open https://<$local_ip:localhost>:30443
Install Argo WorkFlow
Preliminary
- Kubernets has installed, if not check 🔗link
- Argo CD has installed, if not check 🔗link
- cert-manager has installed on argocd and the clusterissuer has a named
self-signed-ca-issuer
service, , if not check 🔗link
1. prepare argo-workflows.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argo-workflows
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://argoproj.github.io/argo-helm
chart: argo-workflows
targetRevision: 0.40.11
helm:
releaseName: argo-workflows
values: |
crds:
install: true
keep: false
singleNamespace: false
controller:
image:
registry: m.daocloud.io/quay.io
workflowNamespaces:
- business-workflows
executor:
image:
registry: m.daocloud.io/quay.io
workflow:
serviceAccount:
create: true
rbac:
create: true
server:
enabled: true
image:
registry: m.daocloud.io/quay.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hosts:
- argo-workflows.dev.geekcity.tech
paths:
- /?(.*)
tls:
- secretName: argo-workflows-tls
hosts:
- argo-workflows.dev.geekcity.tech
authModes:
- server
sso:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: workflows
2. install argo workflow binary
3. create workflow related namespace
kubectl get namespace business-workflows > /dev/null 2>&1 || kubectl create namespace business-workflows
4. apply to k8s
kubectl -n argocd apply -f argo-workflows.yaml
5. sync by argocd
argocd app sync argocd/argo-workflows
6. check workflow status
# list all flows
argo -n business-workflows list
# get specific flow status
argo -n business-workflows get <$flow_name>
# get specific flow log
argo -n business-workflows logs <$flow_name>
# get specific flow log continuously
argo -n business-workflows logs <$flow_name> --watch
Container
Articles
FQA
Subsections of Container
Install Buildah
Reference
- you can directly install docker engine from 🐳docker official website.
Installation
If you already have something wrong with apt update
, please check the following 🔗link, adding docker source wont help you to solve that problem.
sudo dnf update -y
sudo dnf config-manager --add-repo=https://download.docker.com/linux/fedora/docker-ce.repo
sudo dnf install docker-ce docker-ce-cli containerd.io
Once the installation is complete, start the Docker service
sudo systemctl enable docker
sudo systemctl start docker
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install docker-ce --nobest --allowerasing -y
Once the installation is complete, start the Docker service
sudo systemctl enable docker
sudo systemctl start docker
- Set up Docker’s apt repository.
# Add Docker's official GPG key:
sudo apt-get update
sudo apt-get install ca-certificates curl
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
# Add the repository to Apt sources:
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
- Install the Docker packages.
latest version
sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
specific version
apt-cache madison docker-ce | awk '{ print $3 }'
echo $DOCKER_VERSION=5:28.2.1-1~XXXXX
sudo apt-get install docker-ce=$DOCKER_VERSION docker-ce-cli=$DOCKER_VERSION containerd.io docker-buildx-plugin docker-compose-plugin
- Verify that the installation is successful by running the hello-world image:
sudo docker run hello-world
Info
- Docker Image saved in
/var/lib/docker
Mirror
You can modify /etc/docker/daemon.json
{
"registry-mirrors": ["<$mirror_url>"]
}
for example:
https://docker.mirrors.ustc.edu.cn
Install Docker Engine
Reference
- you can directly install docker engine from 🐳docker official website.
Installation
If you already have something wrong with apt update
, please check the following 🔗link, adding docker source wont help you to solve that problem.
sudo dnf update -y
sudo dnf config-manager --add-repo=https://download.docker.com/linux/fedora/docker-ce.repo
sudo dnf install docker-ce docker-ce-cli containerd.io
Once the installation is complete, start the Docker service
sudo systemctl enable docker
sudo systemctl start docker
sudo yum install -y yum-utils
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
sudo yum install docker-ce --nobest --allowerasing -y
Once the installation is complete, start the Docker service
sudo systemctl enable docker
sudo systemctl start docker
- Set up Docker’s apt repository.
# Add Docker's official GPG key:
sudo apt-get update
sudo apt-get install ca-certificates curl
sudo install -m 0755 -d /etc/apt/keyrings
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg -o /etc/apt/keyrings/docker.asc
sudo chmod a+r /etc/apt/keyrings/docker.asc
# Add the repository to Apt sources:
echo \
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu \
$(. /etc/os-release && echo "${UBUNTU_CODENAME:-$VERSION_CODENAME}") stable" | \ sudo tee /etc/apt/sources.list.d/docker.list > /dev/null
sudo apt-get update
- Install the Docker packages.
latest version
sudo apt-get install docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin
specific version
apt-cache madison docker-ce | awk '{ print $3 }'
echo $DOCKER_VERSION=5:28.2.1-1~XXXXX
sudo apt-get install docker-ce=$DOCKER_VERSION docker-ce-cli=$DOCKER_VERSION containerd.io docker-buildx-plugin docker-compose-plugin
- Verify that the installation is successful by running the hello-world image:
sudo docker run hello-world
Info
- Docker Image saved in
/var/lib/docker
Mirror
You can modify /etc/docker/daemon.json
{
"registry-mirrors": ["<$mirror_url>"]
}
for example:
https://docker.mirrors.ustc.edu.cn
Install Podman
Reference
- you can directly install docker engine from 🐳docker official website.
Installation
If you already have something wrong with apt update
, please check the following 🔗link, adding docker source wont help you to solve that problem.
sudo dnf update -y
sudo dnf -y install podman
sudo yum install -y podman
sudo apt-get update
sudo apt-get -y install podman
Run Params
start an container
podman run [params]
-rm
: delete if failed
-v
: load a volume
Example
podman run --rm\
-v /root/kserve/iris-input.json:/tmp/iris-input.json \
--privileged \
-e MODEL_NAME=sklearn-iris \
-e INPUT_PATH=/tmp/iris-input.json \
-e SERVICE_HOSTNAME=sklearn-iris.kserve-test.example.com \
-it m.daocloud.io/docker.io/library/golang:1.22 sh -c "command A; command B; exec bash"
Database
Subsections of Database
Install Clickhouse
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. argoCD has installed, if not check 🔗link3. cert-manager has installed on argocd and the clusterissuer has a named `self-signed-ca-issuer`service, , if not check 🔗link1.prepare admin credentials secret
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic clickhouse-admin-credentials \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.prepare `deploy-clickhouse.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: clickhouse
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: clickhouse
targetRevision: 4.5.1
helm:
releaseName: clickhouse
values: |
serviceAccount:
name: clickhouse
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
zookeeper:
enabled: true
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
replicaCount: 3
persistence:
enabled: true
storageClass: nfs-external
size: 8Gi
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
shards: 2
replicaCount: 3
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hostname: clickhouse.dev.geekcity.tech
ingressClassName: nginx
path: /?(.*)
tls: true
persistence:
enabled: false
resources:
requests:
cpu: 2
memory: 512Mi
limits:
cpu: 3
memory: 1024Mi
auth:
username: admin
existingSecret: clickhouse-admin-credentials
existingSecretKey: password
metrics:
enabled: true
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
serviceMonitor:
enabled: true
namespace: monitor
jobLabel: clickhouse
selector:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: clickhouse
labels:
release: prometheus-stack
extraDeploy:
- |
apiVersion: apps/v1
kind: Deployment
metadata:
name: clickhouse-tool
namespace: database
labels:
app.kubernetes.io/name: clickhouse-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: clickhouse-tool
template:
metadata:
labels:
app.kubernetes.io/name: clickhouse-tool
spec:
containers:
- name: clickhouse-tool
image: m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine
imagePullPolicy: IfNotPresent
env:
- name: CLICKHOUSE_USER
value: admin
- name: CLICKHOUSE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: clickhouse-admin-credentials
- name: CLICKHOUSE_HOST
value: csst-clickhouse.csst
- name: CLICKHOUSE_PORT
value: "9000"
- name: TZ
value: Asia/Shanghai
command:
- tail
args:
- -f
- /etc/hosts
destination:
server: https://kubernetes.default.svc
namespace: database
3.deploy clickhouse
kubectl -n argocd apply -f deploy-clickhouse.yaml
4.sync by argocd
argocd app sync argocd/clickhouse
5.prepare `clickhouse-interface.yaml`
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: clickhouse
name: clickhouse-interface
spec:
ports:
- name: http
port: 8123
protocol: TCP
targetPort: http
nodePort: 31567
- name: tcp
port: 9000
protocol: TCP
targetPort: tcp
nodePort: 32005
selector:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: clickhouse
app.kubernetes.io/name: clickhouse
type: NodePort
6.apply to k8s
kubectl -n database apply -f clickhouse-interface.yaml
7.extract clickhouse admin credentials
kubectl -n database get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d
8.invoke http api
add `$K8S_MASTER_IP clickhouse.dev.geekcity.tech` to **/etc/hosts**
CK_PASS=$(kubectl -n database get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d)
echo 'SELECT version()' | curl -k "https://admin:${CK_PASS}@clickhouse.dev.geekcity.tech:32443/" --data-binary @-
Preliminary
1. Docker has installed, if not check 🔗linkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p clickhouse/{data,logs}
podman run --rm \
--ulimit nofile=262144:262144 \
--name clickhouse-server \
-p 18123:8123 \
-p 19000:9000 \
-v $(pwd)/clickhouse/data:/var/lib/clickhouse \
-v $(pwd)/clickhouse/logs:/var/log/clickhouse-server \
-e CLICKHOUSE_DB=my_database \
-e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 \
-e CLICKHOUSE_USER=ayayay \
-e CLICKHOUSE_PASSWORD=123456 \
-d m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine
2.check dashboard
And then you can visit 🔗http://localhost:181233.use cli api
And then you can visit 🔗http://localhost:19000podman run --rm \
--entrypoint clickhouse-client \
-it m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine \
--host host.containers.internal \
--port 19000 \
--user ayayay \
--password 123456 \
--query "select version()"
4.use visual client
podman run --rm -p 8080:80 -d m.daocloud.io/docker.io/spoonest/clickhouse-tabix-web-client:stable
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
4.prepare clickhouse admin credentials secret
kubectl get namespace application > /dev/null 2>&1 || kubectl create namespace application
kubectl -n application create secret generic clickhouse-admin-credentials \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
5.prepare deploy-clickhouse-flow.yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-ck-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-clickhouse
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: clickhouse
targetRevision: 4.5.3
helm:
releaseName: app-clickhouse
values: |
image:
registry: docker.io
repository: bitnami/clickhouse
tag: 23.12.3-debian-11-r0
pullPolicy: IfNotPresent
service:
type: ClusterIP
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
path: /?(.*)
hostname: clickhouse.dev.geekcity.tech
tls: true
shards: 2
replicaCount: 3
persistence:
enabled: false
auth:
username: admin
existingSecret: clickhouse-admin-credentials
existingSecretKey: password
zookeeper:
enabled: true
image:
registry: m.daocloud.io/docker.io
repository: bitnami/zookeeper
tag: 3.8.3-debian-11-r8
pullPolicy: IfNotPresent
replicaCount: 3
persistence:
enabled: false
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-clickhouse ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-clickhouse
6.subimit to argo workflow client
argo -n business-workflows submit deploy-clickhouse-flow.yaml
7.extract clickhouse admin credentials
kubectl -n application get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d
8.invoke http api
add `$K8S_MASTER_IP clickhouse.dev.geekcity.tech` to **/etc/hosts**
CK_PASSWORD=$(kubectl -n application get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d) && echo 'SELECT version()' | curl -k "https://admin:${CK_PASSWORD}@clickhouse.dev.geekcity.tech/" --data-binary @-
9.create external interface
kubectl -n application apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: app-clickhouse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: clickhouse
app.kubernetes.io/version: 23.12.2
argocd.argoproj.io/instance: app-clickhouse
helm.sh/chart: clickhouse-4.5.3
name: app-clickhouse-service-external
spec:
ports:
- name: tcp
port: 9000
protocol: TCP
targetPort: tcp
nodePort: 30900
selector:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: app-clickhouse
app.kubernetes.io/name: clickhouse
type: NodePort
EOF
FAQ
Install ElasticSearch
Preliminary
- Kubernetes has installed, if not check 🔗link
- argoCD has installed, if not check 🔗link
- ingres has installed on argoCD, if not check 🔗link
- cert-manager has installed on argocd and the clusterissuer has a named
self-signed-ca-issuer
service, , if not check 🔗link
Steps
1. prepare elastic-search.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: elastic-search
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: elasticsearch
targetRevision: 19.11.3
helm:
releaseName: elastic-search
values: |
global:
kibanaEnabled: true
clusterName: elastic
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
security:
enabled: false
service:
type: ClusterIP
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hostname: elastic-search.dev.tech
ingressClassName: nginx
path: /?(.*)
tls: true
master:
masterOnly: false
replicaCount: 1
persistence:
enabled: false
resources:
requests:
cpu: 2
memory: 1024Mi
limits:
cpu: 4
memory: 4096Mi
heapSize: 2g
data:
replicaCount: 0
persistence:
enabled: false
coordinating:
replicaCount: 0
ingest:
enabled: true
replicaCount: 0
service:
enabled: false
type: ClusterIP
ingress:
enabled: false
metrics:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
sysctlImage:
enabled: true
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
kibana:
elasticsearch:
hosts:
- '{{ include "elasticsearch.service.name" . }}'
port: '{{ include "elasticsearch.service.ports.restAPI" . }}'
esJavaOpts: "-Xmx2g -Xms2g"
destination:
server: https://kubernetes.default.svc
namespace: application
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: elastic-search
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: elasticsearch
targetRevision: 19.11.3
helm:
releaseName: elastic-search
values: |
global:
kibanaEnabled: true
clusterName: elastic
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
security:
enabled: false
service:
type: ClusterIP
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hostname: elastic-search.dev.tech
ingressClassName: nginx
path: /?(.*)
tls: true
master:
masterOnly: false
replicaCount: 1
persistence:
enabled: false
resources:
requests:
cpu: 2
memory: 1024Mi
limits:
cpu: 4
memory: 4096Mi
heapSize: 2g
data:
replicaCount: 0
persistence:
enabled: false
coordinating:
replicaCount: 0
ingest:
enabled: true
replicaCount: 0
service:
enabled: false
type: ClusterIP
ingress:
enabled: false
metrics:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
sysctlImage:
enabled: true
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
kibana:
elasticsearch:
hosts:
- '{{ include "elasticsearch.service.name" . }}'
port: '{{ include "elasticsearch.service.ports.restAPI" . }}'
esJavaOpts: "-Xmx2g -Xms2g"
destination:
server: https://kubernetes.default.svc
namespace: application
3. apply to k8s
kubectl -n argocd apply -f elastic-search.yaml
4. sync by argocd
argocd app sync argocd/elastic-search
[Optional] Test REST API call
add
$K8S_MASTER_IP elastic-search.dev.tech
to/etc/hosts
curl -k "https://elastic-search.dev.tech:32443/?pretty"
[Optional] Add Single Document
curl -k -H "Content-Type: application/json" \
-X POST "https://elastic-search.dev.tech:32443/books/_doc?pretty" \
-d '{"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470}'
Install Kafka
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link1.get helm repo
helm repo add bitnami oci://registry-1.docker.io/bitnamicharts/kafka
helm repo update
2.install chart
helm upgrade --create-namespace -n database kafka --install bitnami/kafka \
--set global.imageRegistry=m.daocloud.io/docker.io \
--set zookeeper.enabled=false \
--set controller.replicaCount=1 \
--set broker.replicaCount=1 \
--set persistance.enabled=false \
--version 28.0.3
helm upgrade --create-namespace -n database kafka --install bitnami/kafka \
--set global.imageRegistry=m.daocloud.io/docker.io \
--set zookeeper.enabled=false \
--set controller.replicaCount=1 \
--set broker.replicaCount=1 \
--set persistance.enabled=false \
--version 28.0.3
kubectl -n database \
create secret generic client-properties \
--from-literal=client.properties="$(printf "security.protocol=SASL_PLAINTEXT\nsasl.mechanism=SCRAM-SHA-256\nsasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username=\"user1\" password=\"$(kubectl get secret kafka-user-passwords --namespace database -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)\";\n")"
kubectl -n database apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-client-tools
labels:
app: kafka-client-tools
spec:
replicas: 1
selector:
matchLabels:
app: kafka-client-tools
template:
metadata:
labels:
app: kafka-client-tools
spec:
volumes:
- name: client-properties
secret:
secretName: client-properties
containers:
- name: kafka-client-tools
image: m.daocloud.io/docker.io/bitnami/kafka:3.6.2
volumeMounts:
- name: client-properties
mountPath: /bitnami/custom/client.properties
subPath: client.properties
readOnly: true
env:
- name: BOOTSTRAP_SERVER
value: kafka.database.svc.cluster.local:9092
- name: CLIENT_CONFIG_FILE
value: /bitnami/custom/client.properties
command:
- tail
- -f
- /etc/hosts
imagePullPolicy: IfNotPresent
EOF
3.validate function
- list topicskubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --list'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --create --if-not-exists --topic test-topic'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --describe --topic test-topic'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'for message in $(seq 0 10); do echo $message | kafka-console-producer.sh --bootstrap-server $BOOTSTRAP_SERVER --producer.config $CLIENT_CONFIG_FILE --topic test-topic; done'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-console-consumer.sh --bootstrap-server $BOOTSTRAP_SERVER --consumer.config $CLIENT_CONFIG_FILE --topic test-topic --from-beginning'
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link1.prepare `deploy-kafka.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kafka
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: kafka
targetRevision: 28.0.3
helm:
releaseName: kafka
values: |
image:
registry: m.daocloud.io/docker.io
controller:
replicaCount: 1
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
broker:
replicaCount: 1
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
externalAccess:
enabled: false
autoDiscovery:
enabled: false
image:
registry: m.daocloud.io/docker.io
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
kafka:
enabled: false
image:
registry: m.daocloud.io/docker.io
jmx:
enabled: false
image:
registry: m.daocloud.io/docker.io
provisioning:
enabled: false
kraft:
enabled: true
zookeeper:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: database
EOF
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kafka
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: kafka
targetRevision: 28.0.3
helm:
releaseName: kafka
values: |
image:
registry: m.daocloud.io/docker.io
listeners:
client:
protocol: PLAINTEXT
interbroker:
protocol: PLAINTEXT
controller:
replicaCount: 0
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
broker:
replicaCount: 1
minId: 0
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
externalAccess:
enabled: false
autoDiscovery:
enabled: false
image:
registry: m.daocloud.io/docker.io
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
kafka:
enabled: false
image:
registry: m.daocloud.io/docker.io
jmx:
enabled: false
image:
registry: m.daocloud.io/docker.io
provisioning:
enabled: false
kraft:
enabled: false
zookeeper:
enabled: true
image:
registry: m.daocloud.io/docker.io
replicaCount: 1
auth:
client:
enabled: false
quorum:
enabled: false
persistence:
enabled: false
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
enabled: false
tls:
client:
enabled: false
quorum:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: database
EOF
2.sync by argocd
argocd app sync argocd/kafka
3.set up client tool
kubectl -n database \
create secret generic client-properties \
--from-literal=client.properties="$(printf "security.protocol=SASL_PLAINTEXT\nsasl.mechanism=SCRAM-SHA-256\nsasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username=\"user1\" password=\"$(kubectl get secret kafka-user-passwords --namespace database -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)\";\n")"
kubectl -n database \
create secret generic client-properties \
--from-literal=client.properties="security.protocol=PLAINTEXT"
5.prepare `kafka-client-tools.yaml`
kubectl -n database apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-client-tools
labels:
app: kafka-client-tools
spec:
replicas: 1
selector:
matchLabels:
app: kafka-client-tools
template:
metadata:
labels:
app: kafka-client-tools
spec:
volumes:
- name: client-properties
secret:
secretName: client-properties
containers:
- name: kafka-client-tools
image: m.daocloud.io/docker.io/bitnami/kafka:3.6.2
volumeMounts:
- name: client-properties
mountPath: /bitnami/custom/client.properties
subPath: client.properties
readOnly: true
env:
- name: BOOTSTRAP_SERVER
value: kafka.database.svc.cluster.local:9092
- name: CLIENT_CONFIG_FILE
value: /bitnami/custom/client.properties
- name: ZOOKEEPER_CONNECT
value: kafka-zookeeper.database.svc.cluster.local:2181
command:
- tail
- -f
- /etc/hosts
imagePullPolicy: IfNotPresent
EOF
6.validate function
- list topicskubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --list'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --create --if-not-exists --topic test-topic'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --describe --topic test-topic'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'for message in $(seq 0 10); do echo $message | kafka-console-producer.sh --bootstrap-server $BOOTSTRAP_SERVER --producer.config $CLIENT_CONFIG_FILE --topic test-topic; done'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-console-consumer.sh --bootstrap-server $BOOTSTRAP_SERVER --consumer.config $CLIENT_CONFIG_FILE --topic test-topic --from-beginning'
Preliminary
1. Docker has installed, if not check 🔗linkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p kafka/data
chmod -R 777 kafka/data
podman run --rm \
--name kafka-server \
--hostname kafka-server \
-p 9092:9092 \
-p 9094:9094 \
-v $(pwd)/kafka/data:/bitnami/kafka/data \
-e KAFKA_CFG_NODE_ID=0 \
-e KAFKA_CFG_PROCESS_ROLES=controller,broker \
-e KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-server:9093 \
-e KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094 \
-e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://host.containers.internal:9094 \
-e KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT \
-e KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER \
-d m.daocloud.io/docker.io/bitnami/kafka:3.6.2
2.list topic
BOOTSTRAP_SERVER=host.containers.internal:9094
podman run --rm \
-it m.daocloud.io/docker.io/bitnami/kafka:3.6.2 kafka-topics.sh \
--bootstrap-server $BOOTSTRAP_SERVER --list
2.create topic
BOOTSTRAP_SERVER=host.containers.internal:9094
# BOOTSTRAP_SERVER=10.200.60.64:9094
TOPIC=test-topic
podman run --rm \
-it m.daocloud.io/docker.io/bitnami/kafka:3.6.2 kafka-topics.sh \
--bootstrap-server $BOOTSTRAP_SERVER \
--create \
--if-not-exists \
--topic $TOPIC
2.consume record
BOOTSTRAP_SERVER=host.containers.internal:9094
# BOOTSTRAP_SERVER=10.200.60.64:9094
TOPIC=test-topic
podman run --rm \
-it m.daocloud.io/docker.io/bitnami/kafka:3.6.2 kafka-console-consumer.sh \
--bootstrap-server $BOOTSTRAP_SERVER \
--topic $TOPIC \
--from-beginning
FAQ
Install MariaDB
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. argoCD has installed, if not check 🔗link3. cert-manager has installed on argocd and the clusterissuer has a named `self-signed-ca-issuer`service, , if not check 🔗link1.prepare mariadb credentials secret
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.prepare `deploy-mariadb.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mariadb
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: mariadb
targetRevision: 16.3.2
helm:
releaseName: mariadb
values: |
architecture: standalone
auth:
database: test-mariadb
username: aaron.yang
existingSecret: mariadb-credentials
primary:
extraFlags: "--character-set-server=utf8mb4 --collation-server=utf8mb4_bin"
persistence:
enabled: false
secondary:
replicaCount: 1
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: database
3.deploy mariadb
kubectl -n argocd apply -f deploy-mariadb.yaml
4.sync by argocd
argocd app sync argocd/mariadb
5.check mariadb
kubectl -n database get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.apply rolebinding to k8s
kubectl -n argocd apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
3.prepare mariadb credentials secret
kubectl -n application create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
4.prepare `deploy-mariadb-flow.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-mariadb-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: init-db-tool
template: init-db-tool
dependencies:
- wait
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-mariadb
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: mariadb
targetRevision: 16.5.0
helm:
releaseName: app-mariadb
values: |
architecture: standalone
auth:
database: geekcity
username: aaron.yang
existingSecret: mariadb-credentials
primary:
persistence:
enabled: false
secondary:
replicaCount: 1
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-mariadb ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-mariadb
- name: init-db-tool
resource:
action: apply
manifest: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-mariadb-tool
namespace: application
labels:
app.kubernetes.io/name: mariadb-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mariadb-tool
template:
metadata:
labels:
app.kubernetes.io/name: mariadb-tool
spec:
containers:
- name: mariadb-tool
image: m.daocloud.io/docker.io/bitnami/mariadb:10.5.12-debian-10-r0
imagePullPolicy: IfNotPresent
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
key: mariadb-root-password
name: mariadb-credentials
- name: TZ
value: Asia/Shanghai
5.subimit to argo workflow client
argo -n business-workflows submit deploy-mariadb-flow.yaml
6.decode password
kubectl -n application get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d
Preliminary
1. Docker has installed, if not check 🔗linkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p mariadb/data
podman run \
-p 3306:3306 \
-e MARIADB_ROOT_PASSWORD=mysql \
-d m.daocloud.io/docker.io/library/mariadb:11.2.2-jammy \
--log-bin \
--binlog-format=ROW
2.use web console
And then you can visit 🔗http://localhost:8080username: `root`
password: `mysql`
podman run --rm -p 8080:80 \
-e PMA_ARBITRARY=1 \
-d m.daocloud.io/docker.io/library/phpmyadmin:5.1.1-apache
3.use internal client
podman run --rm \
-e MYSQL_PWD=mysql \
-it m.daocloud.io/docker.io/library/mariadb:11.2.2-jammy \
mariadb \
--host host.containers.internal \
--port 3306 \
--user root \
--database mysql \
--execute 'select version()'
Useful SQL
- list all bin logs
SHOW BINARY LOGS;
- delete previous bin logs
PURGE BINARY LOGS TO 'mysqld-bin.0000003'; # delete mysqld-bin.0000001 and mysqld-bin.0000002
PURGE BINARY LOGS BEFORE 'yyyy-MM-dd HH:mm:ss';
PURGE BINARY LOGS DATE_SUB(NOW(), INTERVAL 3 DAYS); # delete last three days bin log file.
If you using master-slave mode, you can change all BINARY to MASTER
FAQ
Install Milvus
Preliminary
- Kubernetes has installed, if not check link
- argoCD has installed, if not check link
- cert-manager has installed on argocd and the clusterissuer has a named
self-signed-ca-issuer
service, , if not check link - minio has installed, if not check link
Steps
1. copy minio credentials secret
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n storage get secret minio-secret -o json \
| jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' \
| kubectl -n database apply -f -
2. prepare deploy-milvus.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: milvus
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: registry-1.docker.io/bitnamicharts
chart: milvus
targetRevision: 11.2.4
helm:
releaseName: milvus
values: |
global:
security:
allowInsecureImages: true
milvus:
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/milvus
tag: 2.5.7-debian-12-r0
pullPolicy: IfNotPresent
auth:
enabled: false
initJob:
forceRun: false
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/pymilvus
tag: 2.5.6-debian-12-r0
pullPolicy: IfNotPresent
resources:
requests:
cpu: 2
memory: 512Mi
limits:
cpu: 2
memory: 2Gi
dataCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2
memory: 2Gi
metrics:
enabled: true
rootCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
queryCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
indexCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
dataNode:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
queryNode:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
indexNode:
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
proxy:
replicaCount: 1
service:
type: ClusterIP
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
attu:
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/attu
tag: 2.5.5-debian-12-r1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
service:
type: ClusterIP
ingress:
enabled: true
ingressClassName: "nginx"
annotations:
cert-manager.io/cluster-issuer: alidns-webhook-zverse-letsencrypt
hostname: milvus.dev.tech
path: /
pathType: ImplementationSpecific
tls: true
waitContainer:
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r40
pullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
externalS3:
host: "minio.storage"
port: 9000
existingSecret: "minio-secret"
existingSecretAccessKeyIDKey: "root-user"
existingSecretKeySecretKey: "root-password"
bucket: "milvus"
rootPath: "file"
etcd:
enabled: true
image:
registry: m.lab.zverse.space/docker.io
replicaCount: 1
auth:
rbac:
create: false
client:
secureTransport: false
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
persistence:
enabled: true
storageClass: ""
size: 2Gi
preUpgradeJob:
enabled: false
minio:
enabled: false
kafka:
enabled: true
image:
registry: m.lab.zverse.space/docker.io
controller:
replicaCount: 1
livenessProbe:
failureThreshold: 8
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
persistence:
enabled: true
storageClass: ""
size: 2Gi
service:
ports:
client: 9092
extraConfig: |-
offsets.topic.replication.factor=3
listeners:
client:
protocol: PLAINTEXT
interbroker:
protocol: PLAINTEXT
external:
protocol: PLAINTEXT
sasl:
enabledMechanisms: "PLAIN"
client:
users:
- user
broker:
replicaCount: 0
destination:
server: https://kubernetes.default.svc
namespace: database
3. apply to k8s
kubectl -n argocd apply -f deploy-milvus.yaml
4. sync by argocd
argocd app sync argocd/milvus
5. check Attu WebUI
milvus address: milvus-proxy:19530
milvus database: default
https://milvus.dev.tech:32443/#/
5. [Optional] import data
import data by using sql file
MARIADB_ROOT_PASSWORD=$(kubectl -n database get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d)
POD_NAME=$(kubectl get pod -n database -l "app.kubernetes.io/name=mariadb-tool" -o jsonpath="{.items[0].metadata.name}") \
&& export SQL_FILENAME="Dump20240301.sql" \
&& kubectl -n database cp ${SQL_FILENAME} ${POD_NAME}:/tmp/${SQL_FILENAME} \
&& kubectl -n database exec -it deployment/app-mariadb-tool -- bash -c \
'echo "create database ccds;" | mysql -h mariadb.database -uroot -p$MARIADB_ROOT_PASSWORD' \
&& kubectl -n database exec -it ${POD_NAME} -- bash -c \
"mysql -h mariadb.database -uroot -p\${MARIADB_ROOT_PASSWORD} \
ccds < /tmp/Dump20240301.sql"
6. [Optional] decode password
kubectl -n database get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d
7. [Optional] execute sql in pod
kubectl -n database exec -it xxxx bash
mariadb -h 127.0.0.1 -u root -p$MARIADB_ROOT_PASSWORD
And then you can check connection by
show status like 'Threads%';
Install Neo4j
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare `deploy-xxxxx.yaml`
2.apply to k8s
kubectl -n argocd apply -f xxxx.yaml
3.sync by argocd
argocd app sync argocd/xxxx
4.prepare yaml-content.yaml
5.apply to k8s
kubectl apply -f xxxx.yaml
6.apply xxxx.yaml directly
kubectl apply -f - <<EOF
EOF
Preliminary
1. Docker|Podman|Buildah has installed, if not check 🔗linkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p neo4j/data
podman run --rm \
--name neo4j \
-p 7474:7474 \
-p 7687:7687 \
-e neo4j_ROOT_PASSWORD=mysql \
-v $(pwd)/neo4j/data:/data \
-d docker.io/library/neo4j:5.18.0-community-bullseye
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
4.prepare `deploy-xxxx-flow.yaml`
6.subimit to argo workflow client
argo -n business-workflows submit deploy-xxxx-flow.yaml
7.decode password
kubectl -n application get secret xxxx-credentials -o jsonpath='{.data.xxx-password}' | base64 -d
FAQ
Install Postgresql
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare `deploy-xxxxx.yaml`
2.apply to k8s
kubectl -n argocd apply -f xxxx.yaml
3.sync by argocd
argocd app sync argocd/xxxx
4.prepare yaml-content.yaml
5.apply to k8s
kubectl apply -f xxxx.yaml
6.apply xxxx.yaml directly
kubectl apply -f - <<EOF
EOF
Preliminary
1. Docker|Podman|Buildah has installed, if not check 🔗linkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p $(pwd)/postgresql/data
podman run --rm \
--name postgresql \
-p 5432:5432 \
-e POSTGRES_PASSWORD=postgresql \
-e PGDATA=/var/lib/postgresql/data/pgdata \
-v $(pwd)/postgresql/data:/var/lib/postgresql/data \
-d docker.io/library/postgres:15.2-alpine3.17
2.use web console
podman run --rm \
-p 8080:80 \
-e 'PGADMIN_DEFAULT_EMAIL=ben.wangz@foxmail.com' \
-e 'PGADMIN_DEFAULT_PASSWORD=123456' \
-d docker.io/dpage/pgadmin4:6.15
3.use internal client
podman run --rm \
--env PGPASSWORD=postgresql \
--entrypoint psql \
-it docker.io/library/postgres:15.2-alpine3.17 \
--host host.containers.internal \
--port 5432 \
--username postgres \
--dbname postgres \
--command 'select version()'
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link5. Minio artifact repository has been configured, if not check 🔗link- endpoint: minio.storage:90001.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
ARGOCD_USERNAME=admin
ARGOCD_PASSWORD=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
kubectl -n business-workflows create secret generic argocd-login-credentials \
--from-literal=username=${ARGOCD_USERNAME} \
--from-literal=password=${ARGOCD_PASSWORD}
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
3.prepare postgresql admin credentials secret
kubectl -n application create secret generic postgresql-credentials \
--from-literal=postgres-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
4.prepare `deploy-postgresql-flow.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-pg-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: init-db-tool
template: init-db-tool
dependencies:
- wait
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-postgresql
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: postgresql
targetRevision: 14.2.2
helm:
releaseName: app-postgresql
values: |
architecture: standalone
auth:
database: geekcity
username: aaron.yang
existingSecret: postgresql-credentials
primary:
persistence:
enabled: false
readReplicas:
replicaCount: 1
persistence:
enabled: false
backup:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-postgresql ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-postgresql
- name: init-db-tool
resource:
action: apply
manifest: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-postgresql-tool
namespace: application
labels:
app.kubernetes.io/name: postgresql-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: postgresql-tool
template:
metadata:
labels:
app.kubernetes.io/name: postgresql-tool
spec:
containers:
- name: postgresql-tool
image: m.daocloud.io/docker.io/bitnami/postgresql:14.4.0-debian-11-r9
imagePullPolicy: IfNotPresent
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: postgres-password
name: postgresql-credentials
- name: TZ
value: Asia/Shanghai
command:
- tail
args:
- -f
- /etc/hosts
6.subimit to argo workflow client
argo -n business-workflows submit deploy-postgresql.yaml
7.decode password
kubectl -n application get secret postgresql-credentials -o jsonpath='{.data.postgres-password}' | base64 -d
8.import data
POSTGRES_PASSWORD=$(kubectl -n application get secret postgresql-credentials -o jsonpath='{.data.postgres-password}' | base64 -d) \
POD_NAME=$(kubectl get pod -n application -l "app.kubernetes.io/name=postgresql-tool" -o jsonpath="{.items[0].metadata.name}") \
&& export SQL_FILENAME="init_dfs_table_data.sql" \
&& kubectl -n application cp ${SQL_FILENAME} ${POD_NAME}:/tmp/${SQL_FILENAME} \
&& kubectl -n application exec -it deployment/app-postgresql-tool -- bash -c \
'echo "CREATE DATABASE csst;" | PGPASSWORD="$POSTGRES_PASSWORD" \
psql --host app-postgresql.application -U postgres -d postgres -p 5432' \
&& kubectl -n application exec -it deployment/app-postgresql-tool -- bash -c \
'PGPASSWORD="$POSTGRES_PASSWORD" psql --host app-postgresql.application \
-U postgres -d csst -p 5432 < /tmp/init_dfs_table_data.sql'
FAQ
Install Redis
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare `deploy-xxxxx.yaml`
2.apply to k8s
kubectl -n argocd apply -f xxxx.yaml
3.sync by argocd
argocd app sync argocd/xxxx
4.prepare yaml-content.yaml
5.apply to k8s
kubectl apply -f xxxx.yaml
6.apply xxxx.yaml directly
kubectl apply -f - <<EOF
EOF
Preliminary
1. Docker|Podman|Buildah has installed, if not check 🔗linkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p $(pwd)/redis/data
podman run --rm \
--name redis \
-p 6379:6379 \
-d docker.io/library/redis:7.2.4-alpine
1.use internal client
podman run --rm \
-it docker.io/library/redis:7.2.4-alpine \
redis-cli \
-h host.containers.internal \
set mykey somevalue
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link5. Minio artifact repository has been configured, if not check 🔗link- endpoint: minio.storage:90001.prepare `argocd-login-credentials`
ARGOCD_USERNAME=admin
ARGOCD_PASSWORD=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
kubectl -n business-workflows create secret generic argocd-login-credentials \
--from-literal=username=${ARGOCD_USERNAME} \
--from-literal=password=${ARGOCD_PASSWORD}
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
3.prepare redis credentials secret
kubectl -n application create secret generic redis-credentials \
--from-literal=redis-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
4.prepare `deploy-redis-flow.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-redis-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-redis
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: redis
targetRevision: 18.16.0
helm:
releaseName: app-redis
values: |
architecture: replication
auth:
enabled: true
sentinel: true
existingSecret: redis-credentials
master:
count: 1
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: false
replica:
replicaCount: 3
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sentinel:
enabled: false
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sysctl:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-redis ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-redis
6.subimit to argo workflow client
argo -n business-workflows submit deploy-redis-flow.yaml
7.decode password
kubectl -n application get secret redis-credentials -o jsonpath='{.data.redis-password}' | base64 -d
FAQ
HPC
Monitor
Subsections of Monitor
Install Permetheus Stack
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗linkhelm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. argoCD has installed, if not check 🔗link3. ingres has installed on argoCD, if not check 🔗linkcert-manager has installed on argocd and the clusterissuer has a named self-signed-ca-issuer
service, , if not check 🔗link
kubectl get namespaces monitor > /dev/null 2>&1 || kubectl create namespace monitor
kubectl -n monitor create secret generic prometheus-stack-credentials \
--from-literal=grafana-username=admin \
--from-literal=grafana-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus-stack
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: kube-prometheus-stack
targetRevision: 72.6.2
helm:
releaseName: prometheus-stack
values: |
crds:
enabled: true
global:
rbac:
create: true
imageRegistry: ""
imagePullSecrets: []
alertmanager:
enabled: true
ingress:
enabled: false
serviceMonitor:
selfMonitor: true
interval: ""
alertmanagerSpec:
image:
registry: m.daocloud.io/quay.io
repository: prometheus/alertmanager
tag: v0.28.1
replicas: 1
resources: {}
storage:
volumeClaimTemplate:
spec:
storageClassName: ""
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 2Gi
grafana:
enabled: true
ingress:
enabled: true
annotations:
cert-manager.io/clusterissuer: self-signed-issuer
kubernetes.io/ingress.class: nginx
hosts:
- grafana.dev.tech
path: /
pathtype: ImplementationSpecific
tls:
- secretName: grafana.dev.tech-tls
hosts:
- grafana.dev.tech
prometheusOperator:
admissionWebhooks:
patch:
resources: {}
image:
registry: m.daocloud.io/registry.k8s.io
repository: ingress-nginx/kube-webhook-certgen
tag: v1.5.3
image:
registry: m.daocloud.io/quay.io
repository: prometheus-operator/prometheus-operator
prometheusConfigReloader:
image:
registry: m.daocloud.io/quay.io
repository: prometheus-operator/prometheus-config-reloader
resources: {}
thanosImage:
registry: m.daocloud.io/quay.io
repository: thanos/thanos
tag: v0.38.0
prometheus:
enabled: true
ingress:
enabled: true
annotations:
cert-manager.io/clusterissuer: self-signed-issuer
kubernetes.io/ingress.class: nginx
hosts:
- prometheus.dev.tech
path: /
pathtype: ImplementationSpecific
tls:
- secretName: prometheus.dev.tech-tls
hosts:
- prometheus.dev.tech
prometheusSpec:
image:
registry: m.daocloud.io/quay.io
repository: prometheus/prometheus
tag: v3.4.0
replicas: 1
shards: 1
resources: {}
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: ""
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 2Gi
thanosRuler:
enabled: false
ingress:
enabled: false
thanosRulerSpec:
replicas: 1
storage: {}
resources: {}
image:
registry: m.daocloud.io/quay.io
repository: thanos/thanos
tag: v0.38.0
destination:
server: https://kubernetes.default.svc
namespace: monitor
kubectl -n argocd apply -f prometheus-stack.yaml
argocd app sync argocd/prometheus-stack
kubectl -n monitor get secret prometheus-stack-credentials -o jsonpath='{.data.grafana-password}' | base64 -d
> add `$K8S_MASTER_IP grafana.dev.tech` to **/etc/hosts**
> add `$K8S_MASTER_IP prometheus.dev.tech` to **/etc/hosts**
install based on docker
echo "start from head is important"
FAQ
Networking
Subsections of Networking
Install Ingress
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗linkhelm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. argoCD has installed, if not check 🔗linkapiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress-nginx
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: ingress-nginx
targetRevision: 4.11.3
helm:
releaseName: ingress-nginx
values: |
controller:
image:
registry: m.daocloud.io
image: registry.k8s.io/ingress-nginx/controller
tag: "v1.9.5"
pullPolicy: IfNotPresent
service:
enabled: true
type: NodePort
nodePorts:
http: 32080
https: 32443
tcp:
8080: 32808
admissionWebhooks:
enabled: true
patch:
enabled: true
image:
registry: m.daocloud.io
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen
tag: v20231011-8b53cabe0
pullPolicy: IfNotPresent
defaultBackend:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: basic-components
kubectl -n argocd apply -f ingress-nginx.yaml
argocd app sync argocd/ingress-nginx
install based on docker
echo "start from head is important"
FAQ
Install Istio
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare `deploy-istio-base.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: istio-base
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://istio-release.storage.googleapis.com/charts
chart: base
targetRevision: 1.23.2
helm:
releaseName: istio-base
values: |
defaults:
global:
istioNamespace: istio-system
base:
enableCRDTemplates: false
enableIstioConfigCRDs: true
defaultRevision: "default"
destination:
server: https://kubernetes.default.svc
namespace: istio-system
EOF
2.sync by argocd
argocd app sync argocd/istio-base
3.prepare `deploy-istiod.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: istiod
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://istio-release.storage.googleapis.com/charts
chart: istiod
targetRevision: 1.23.2
helm:
releaseName: istiod
values: |
defaults:
global:
istioNamespace: istio-system
defaultResources:
requests:
cpu: 10m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
hub: m.daocloud.io/docker.io/istio
proxy:
autoInject: disabled
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
pilot:
autoscaleEnabled: true
resources:
requests:
cpu: 500m
memory: 2048Mi
cpu:
targetAverageUtilization: 80
podAnnotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
destination:
server: https://kubernetes.default.svc
namespace: istio-system
EOF
4.sync by argocd
argocd app sync argocd/istiod
5.prepare `deploy-istio-ingressgateway.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: istio-ingressgateway
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://istio-release.storage.googleapis.com/charts
chart: gateway
targetRevision: 1.23.2
helm:
releaseName: istio-ingressgateway
values: |
defaults:
replicaCount: 1
podAnnotations:
inject.istio.io/templates: "gateway"
sidecar.istio.io/inject: "true"
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
service:
type: LoadBalancer
ports:
- name: status-port
port: 15021
protocol: TCP
targetPort: 15021
- name: http2
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 5
destination:
server: https://kubernetes.default.svc
namespace: istio-system
EOF
6.sync by argocd
argocd app sync argocd/istio-ingressgateway
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
4.prepare `deploy-xxxx-flow.yaml`
6.subimit to argo workflow client
argo -n business-workflows submit deploy-xxxx-flow.yaml
7.decode password
kubectl -n application get secret xxxx-credentials -o jsonpath='{.data.xxx-password}' | base64 -d
FAQ
Install Nginx
1. prepare server.conf
cat << EOF > default.conf
server {
listen 80;
location / {
root /usr/share/nginx/html;
autoindex on;
}
}
EOF
2. install
mkdir $(pwd)/data
podman run --rm -p 8080:80 \
-v $(pwd)/data:/usr/share/nginx/html:ro \
-v $(pwd)/default.conf:/etc/nginx/conf.d/default.conf:ro \
-d docker.io/library/nginx:1.19.9-alpine
echo 'this is a test' > $(pwd)/data/some-data.txt
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
visit http://localhost:8080
RPC
Subsections of RPC
gRpc
This guide gets you started with gRPC in C++ with a simple working example.
In the C++ world, there’s no universally accepted standard for managing project dependencies. You need to build and install gRPC before building and running this quick start’s Hello World example.
Build and locally install gRPC and Protocol Buffers. The steps in the section explain how to build and locally install gRPC and Protocol Buffers using cmake. If you’d rather use bazel, see Building from source.
1. Setup
Choose a directory to hold locally installed packages. This page assumes that the environment variable MY_INSTALL_DIR
holds this directory path. For example:
export MY_INSTALL_DIR=$HOME/.local
Ensure that the directory exists:
mkdir -p $MY_INSTALL_DIR
Add the local bin folder to your path variable, for example:
export PATH="$MY_INSTALL_DIR/bin:$PATH"
We strongly encourage you to install gRPC locally — using an appropriately set CMAKE_INSTALL_PREFIX
— because there is no easy way to uninstall gRPC after you’ve installed it globally.
2. Install Essentials
2.1 Install Cmake
You need version 3.13 or later of cmake. Install it by following these instructions:
sudo apt install -y cmake
brew install cmake
2.2 Install basic tools required to build gRPC
sudo apt install -y build-essential autoconf libtool pkg-config
brew install autoconf automake libtool pkg-config
2.3 Clone the grpc repo
Clone the grpc repo and its submodules:
git clone --recurse-submodules -b v1.62.0 --depth 1 --shallow-submodules https://github.com/grpc/grpc
2.4 Build and install gRPC and Protocol Buffers
While not mandatory, gRPC applications usually leverage Protocol Buffers for service definitions and data serialization, and the example code uses proto3
.
The following commands build and locally install gRPC and Protocol Buffers:
cd grpc
mkdir -p cmake/build
pushd cmake/build
cmake -DgRPC_INSTALL=ON \
-DgRPC_BUILD_TESTS=OFF \
-DCMAKE_INSTALL_PREFIX=$MY_INSTALL_DIR \
../..
make -j 4
make install
popd
3. Run the example
The example code is part of the grpc
repo source, which you cloned as part of the steps of the previous section.
3.1 change the example’s directory:
cd examples/cpp/helloworld
3.2 build the example project by using cmake
make sure you still can
echo $MY_INSTALL_DIR
, and return a valid result
mkdir -p cmake/build
pushd cmake/build
cmake -DCMAKE_PREFIX_PATH=$MY_INSTALL_DIR ../..
make -j 4
3.3 run the server
./greeter_server
3.4 from a different terminal, run the client and see the client output:
./greeter_client
and the result should be like this:
Greeter received: Hello world
Storage
Subsections of Storage
Deploy Artifict Repository
Preliminary
- Kubernetes has installed, if not check link
- minio is ready for artifact repository
endpoint:
minio.storage:9000
Steps
1. prepare bucket for s3 artifact repository
# K8S_MASTER_IP could be you master ip or loadbalancer external ip
K8S_MASTER_IP=172.27.253.27
MINIO_ACCESS_SECRET=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.rootPassword}' | base64 -d)
podman run --rm \
--entrypoint bash \
--add-host=minio-api.dev.geekcity.tech:${K8S_MASTER_IP} \
-it docker.io/minio/mc:latest \
-c "mc alias set minio http://minio-api.dev.geekcity.tech admin ${MINIO_ACCESS_SECRET} \
&& mc ls minio \
&& mc mb --ignore-existing minio/argo-workflows-artifacts"
2. prepare secret s3-artifact-repository-credentials
will create business-workflows namespace
MINIO_ACCESS_KEY=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.rootUser}' | base64 -d)
kubectl -n business-workflows create secret generic s3-artifact-repository-credentials \
--from-literal=accessKey=${MINIO_ACCESS_KEY} \
--from-literal=secretKey=${MINIO_ACCESS_SECRET}
3. prepare configMap artifact-repositories.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: artifact-repositories
annotations:
workflows.argoproj.io/default-artifact-repository: default-artifact-repository
data:
default-artifact-repository: |
s3:
endpoint: minio.storage:9000
insecure: true
accessKeySecret:
name: s3-artifact-repository-credentials
key: accessKey
secretKeySecret:
name: s3-artifact-repository-credentials
key: secretKey
bucket: argo-workflows-artifacts
4. apply artifact-repositories.yaml
to k8s
kubectl -n business-workflows apply -f artifact-repositories.yaml
Install Minio
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Ingres has installed on argoCD, if not check 🔗link4. Cert-manager has installed on argocd and the clusterissuer has a named `self-signed-ca-issuer`service, , if not check 🔗link1.prepare minio credentials secret
kubectl get namespaces storage > /dev/null 2>&1 || kubectl create namespace storage
kubectl -n storage create secret generic minio-secret \
--from-literal=root-user=admin \
--from-literal=root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.prepare `deploy-minio.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: minio
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: minio
targetRevision: 16.0.10
helm:
releaseName: minio
values: |
global:
imageRegistry: "m.daocloud.io/docker.io"
imagePullSecrets: []
storageClass: ""
security:
allowInsecureImages: true
compatibility:
openshift:
adaptSecurityContext: auto
image:
registry: m.daocloud.io/docker.io
repository: bitnami/minio
clientImage:
registry: m.daocloud.io/docker.io
repository: bitnami/minio-client
mode: standalone
defaultBuckets: ""
auth:
# rootUser: admin
# rootPassword: ""
existingSecret: "minio-secret"
statefulset:
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
replicaCount: 1
zones: 1
drivesPerNode: 1
resourcesPreset: "micro"
resources:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 512Mi
cpu: 250m
ingress:
enabled: true
ingressClassName: "nginx"
hostname: minio-console.ay.dev
path: /?(.*)
pathType: ImplementationSpecific
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$1
tls: true
selfSigned: true
extraHosts: []
apiIngress:
enabled: true
ingressClassName: "nginx"
hostname: minio-api.ay.dev
path: /?(.*)
pathType: ImplementationSpecific
annotations:
nginx.ingress.kubernetes.io/rewrite-target: /$1
persistence:
enabled: false
storageClass: ""
mountPath: /bitnami/minio/data
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
existingClaim: ""
metrics:
prometheusAuthType: public
enabled: false
serviceMonitor:
enabled: false
namespace: ""
labels: {}
jobLabel: ""
paths:
- /minio/v2/metrics/cluster
- /minio/v2/metrics/node
interval: 30s
scrapeTimeout: ""
honorLabels: false
prometheusRule:
enabled: false
namespace: ""
additionalLabels: {}
rules: []
destination:
server: https://kubernetes.default.svc
namespace: storage
EOF
3.sync by argocd
argocd app sync argocd/minio
4.decode minio secret
kubectl -n storage get secret minio-secret -o jsonpath='{.data.root-password}' | base64 -d
5.visit web console
add $K8S_MASTER_IP minio-console.dev.tech
to /etc/hosts
address: 🔗http://minio-console.dev.tech:32080/login
access key:
admin
secret key: ``
6.using mc
K8S_MASTER_IP=$(kubectl get node -l node-role.kubernetes.io/control-plane -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
MINIO_ACCESS_SECRET=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.root-password}' | base64 -d)
podman run --rm \
--entrypoint bash \
--add-host=minio-api.dev.tech:${K8S_MASTER_IP} \
-it m.daocloud.io/docker.io/minio/mc:latest \
-c "mc alias set minio http://minio-api.dev.tech:32080 admin ${MINIO_ACCESS_SECRET} \
&& mc ls minio \
&& mc mb --ignore-existing minio/test \
&& mc cp /etc/hosts minio/test/etc/hosts \
&& mc ls --recursive minio"
K8S_MASTER_IP=$(kubectl get node -l node-role.kubernetes.io/control-plane -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
MINIO_ACCESS_SECRET=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.root-password}' | base64 -d)
podman run --rm \
--entrypoint bash \
--add-host=minio-api.dev.tech:${K8S_MASTER_IP} \
-it m.daocloud.io/docker.io/minio/mc:latest
Preliminary
1. Docker has installed, if not check 🔗linkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p $(pwd)/minio/data
podman run --rm \
--name minio-server \
-p 9000:9000 \
-p 9001:9001 \
-v $(pwd)/minio/data:/data \
-d docker.io/minio/minio:latest server /data --console-address :9001
2.use web console
And then you can visit 🔗http://localhost:9001username: `minioadmin`
password: `minioadmin`
3.use internal client
podman run --rm \
--entrypoint bash \
-it docker.io/minio/mc:latest \
-c "mc alias set minio http://host.docker.internal:9000 minioadmin minioadmin \
&& mc ls minio \
&& mc mb --ignore-existing minio/test \
&& mc cp /etc/hosts minio/test/etc/hosts \
&& mc ls --recursive minio"
FAQ
Install NFS
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. argoCD has installed, if not check 🔗link3. ingres has installed on argoCD, if not check 🔗link1.prepare `nfs-provisioner.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nfs-provisioner
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
chart: nfs-subdir-external-provisioner
targetRevision: 4.0.18
helm:
releaseName: nfs-provisioner
values: |
image:
repository: m.daocloud.io/registry.k8s.io/sig-storage/nfs-subdir-external-provisioner
pullPolicy: IfNotPresent
nfs:
server: nfs.services.test
path: /
mountOptions:
- vers=4
- minorversion=0
- rsize=1048576
- wsize=1048576
- hard
- timeo=600
- retrans=2
- noresvport
volumeName: nfs-subdir-external-provisioner-nas
reclaimPolicy: Retain
storageClass:
create: true
defaultClass: true
name: nfs-external-nas
destination:
server: https://kubernetes.default.svc
namespace: storage
3.deploy mariadb
kubectl -n argocd apply -f nfs-provisioner.yaml
4.sync by argocd
argocd app sync argocd/nfs-provisioner
Preliminary
1. Docker has installed, if not check 🔗linkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
echo -e "nfs\nnfsd" > /etc/modules-load.d/nfs4.conf
modprobe nfs && modprobe nfsd
mkdir -p $(pwd)/data/nfs/data
echo '/data *(rw,fsid=0,no_subtree_check,insecure,no_root_squash)' > $(pwd)/data/nfs/exports
podman run \
--name nfs4 \
--rm \
--privileged \
-p 2049:2049 \
-v $(pwd)/data/nfs/data:/data \
-v $(pwd)/data/nfs/exports:/etc/exports:ro \
-d docker.io/erichough/nfs-server:2.2.1
Preliminary
1. centos yum repo source has updated, if not check 🔗link2.1.install nfs util
sudo apt update -y
sudo apt-get install nfs-common
dnf update -y
dnf install -y nfs-utils rpcbindn
sudo apt update -y
sudo apt-get install nfs-common
2. create share folder
mkdir /data && chmod 755 /data
3.edit `/etc/exports`
/data *(rw,sync,insecure,no_root_squash,no_subtree_check)
4.start nfs server
systemctl enable rpcbind
systemctl enable nfs-server
systemctl start rpcbind
systemctl start nfs-server
5.test load on localhost
showmount -e localhost
6.test load on other ip
showmount -e 192.168.aa.bb
7.mount nfs disk
mkdir -p $(pwd)/mnt/nfs
sudo mount -v 192.168.aa.bb:/data $(pwd)/mnt/nfs -o proto=tcp -o nolock
8.set nfs auto mount
echo "192.168.aa.bb:/data /data nfs rw,auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0" >> /etc/fstab
df -h
Notes
[Optional] create new partition
fdisk /dev/vdb
# n
# p
# w
parted
#select /dev/vdb
#mklabel gpt
#mkpart primary 0 -1
#Cancel
#mkpart primary 0% 100%
#print
[Optional]Format disk
mkfs.xfs /dev/vdb1 -f
[Optional] mount disk to folder
mount /dev/vdb1 /data
[Optional] mount when restart
#vim `/etc/fstab`
/dev/vdb1 /data xfs defaults 0 0
FAQ
Install Reids
Preliminary
- Kubernetes has installed, if not check link
- argoCD has installed, if not check link
- ingres has installed on argoCD, if not check link
- cert-manager has installed on argocd and the clusterissuer has a named
self-signed-ca-issuer
service, , if not check link
Steps
1. prepare secret
kubectl get namespaces storage > /dev/null 2>&1 || kubectl create namespace storage
kubectl -n storage create secret generic redis-credentials \
--from-literal=redis-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2. prepare redis.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: redis
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: redis
targetRevision: 18.16.0
helm:
releaseName: redis
values: |
architecture: replication
auth:
enabled: true
sentinel: true
existingSecret: redis-credentials
master:
count: 1
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: true
storageClass: nfs-external
size: 8Gi
replica:
replicaCount: 3
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: true
storageClass: nfs-external
size: 8Gi
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sentinel:
enabled: false
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sysctl:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
extraDeploy:
- |
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-tool
namespace: csst
labels:
app.kubernetes.io/name: redis-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-tool
template:
metadata:
labels:
app.kubernetes.io/name: redis-tool
spec:
containers:
- name: redis-tool
image: m.daocloud.io/docker.io/bitnami/redis:7.2.4-debian-12-r8
imagePullPolicy: IfNotPresent
env:
- name: REDISCLI_AUTH
valueFrom:
secretKeyRef:
key: redis-password
name: redis-credentials
- name: TZ
value: Asia/Shanghai
command:
- tail
- -f
- /etc/hosts
destination:
server: https://kubernetes.default.svc
namespace: storage
3. apply to k8s
kubectl -n argocd apply -f redis.yaml
4. sync by argocd
argocd app sync argocd/redis
5. decode password
kubectl -n storage get secret redis-credentials -o jsonpath='{.data.redis-password}' | base64 -d
tests
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage ping
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage set mykey somevalue
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage get mykey
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage del mykey
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage get mykey