Subsections of 🪀Install Shit
Application
Subsections of Application
Datahub
Preliminary
- Kubernetes has installed, if not check link
- argoCD has installed, if not check link
- Elasticsearch has installed, if not check link
- MariaDB has installed, if not check link
- Kafka has installed, if not check link
Steps
1. prepare datahub credentials secret
kubectl -n application \
create secret generic datahub-credentials \
--from-literal=mysql-root-password="$(kubectl get secret mariadb-credentials --namespace database -o jsonpath='{.data.mariadb-root-password}' | base64 -d)"kubectl -n application \
create secret generic datahub-credentials \
--from-literal=mysql-root-password="$(kubectl get secret mariadb-credentials --namespace database -o jsonpath='{.data.mariadb-root-password}' | base64 -d)" \
--from-literal=security.protocol="SASL_PLAINTEXT" \
--from-literal=sasl.mechanism="SCRAM-SHA-256" \
--from-literal=sasl.jaas.config="org.apache.kafka.common.security.scram.ScramLoginModule required username=\"user1\" password=\"$(kubectl get secret kafka-user-passwords --namespace database -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)\";"5. prepare deploy-datahub.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: datahub
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://helm.datahubproject.io
chart: datahub
targetRevision: 0.4.8
helm:
releaseName: datahub
values: |
global:
elasticsearch:
host: elastic-search-elasticsearch.application.svc.cluster.local
port: 9200
skipcheck: "false"
insecure: "false"
useSSL: "false"
kafka:
bootstrap:
server: kafka.database.svc.cluster.local:9092
zookeeper:
server: kafka-zookeeper.database.svc.cluster.local:2181
sql:
datasource:
host: mariadb.database.svc.cluster.local:3306
hostForMysqlClient: mariadb.database.svc.cluster.local
port: 3306
url: jdbc:mysql://mariadb.database.svc.cluster.local:3306/datahub?verifyServerCertificate=false&useSSL=true&useUnicode=yes&characterEncoding=UTF-8&enabledTLSProtocols=TLSv1.2
driver: com.mysql.cj.jdbc.Driver
username: root
password:
secretRef: datahub-credentials
secretKey: mysql-root-password
datahub-gms:
enabled: true
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-gms
service:
type: ClusterIP
ingress:
enabled: false
datahub-frontend:
enabled: true
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-frontend-react
defaultUserCredentials:
randomAdminPassword: true
service:
type: ClusterIP
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
hosts:
- host: datahub.dev.geekcity.tech
paths:
- /
tls:
- secretName: "datahub.dev.geekcity.tech-tls"
hosts:
- datahub.dev.geekcity.tech
acryl-datahub-actions:
enabled: true
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-actions
datahub-mae-consumer:
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-mae-consumer
ingress:
enabled: false
datahub-mce-consumer:
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-mce-consumer
ingress:
enabled: false
datahub-ingestion-cron:
enabled: false
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-ingestion
elasticsearchSetupJob:
enabled: true
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-elasticsearch-setup
kafkaSetupJob:
enabled: true
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-kafka-setup
mysqlSetupJob:
enabled: true
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-mysql-setup
postgresqlSetupJob:
enabled: false
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-postgres-setup
datahubUpgrade:
enabled: true
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-upgrade
datahubSystemUpdate:
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-upgrade
destination:
server: https://kubernetes.default.svc
namespace: applicationapiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: datahub
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://helm.datahubproject.io
chart: datahub
targetRevision: 0.4.8
helm:
releaseName: datahub
values: |
global:
springKafkaConfigurationOverrides:
security.protocol: SASL_PLAINTEXT
sasl.mechanism: SCRAM-SHA-256
credentialsAndCertsSecrets:
name: datahub-credentials
secureEnv:
sasl.jaas.config: sasl.jaas.config
elasticsearch:
host: elastic-search-elasticsearch.application.svc.cluster.local
port: 9200
skipcheck: "false"
insecure: "false"
useSSL: "false"
kafka:
bootstrap:
server: kafka.database.svc.cluster.local:9092
zookeeper:
server: kafka-zookeeper.database.svc.cluster.local:2181
neo4j:
host: neo4j.database.svc.cluster.local:7474
uri: bolt://neo4j.database.svc.cluster.local
username: neo4j
password:
secretRef: datahub-credentials
secretKey: neo4j-password
sql:
datasource:
host: mariadb.database.svc.cluster.local:3306
hostForMysqlClient: mariadb.database.svc.cluster.local
port: 3306
url: jdbc:mysql://mariadb.database.svc.cluster.local:3306/datahub?verifyServerCertificate=false&useSSL=true&useUnicode=yes&characterEncoding=UTF-8&enabledTLSProtocols=TLSv1.2
driver: com.mysql.cj.jdbc.Driver
username: root
password:
secretRef: datahub-credentials
secretKey: mysql-root-password
datahub-gms:
enabled: true
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-gms
service:
type: ClusterIP
ingress:
enabled: false
datahub-frontend:
enabled: true
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-frontend-react
defaultUserCredentials:
randomAdminPassword: true
service:
type: ClusterIP
ingress:
enabled: true
className: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
hosts:
- host: datahub.dev.geekcity.tech
paths:
- /
tls:
- secretName: "datahub.dev.geekcity.tech-tls"
hosts:
- datahub.dev.geekcity.tech
acryl-datahub-actions:
enabled: true
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-actions
datahub-mae-consumer:
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-mae-consumer
ingress:
enabled: false
datahub-mce-consumer:
replicaCount: 1
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-mce-consumer
ingress:
enabled: false
datahub-ingestion-cron:
enabled: false
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-ingestion
elasticsearchSetupJob:
enabled: true
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-elasticsearch-setup
kafkaSetupJob:
enabled: true
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-kafka-setup
mysqlSetupJob:
enabled: true
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-mysql-setup
postgresqlSetupJob:
enabled: false
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-postgres-setup
datahubUpgrade:
enabled: true
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-upgrade
datahubSystemUpdate:
image:
repository: m.daocloud.io/docker.io/acryldata/datahub-upgrade
destination:
server: https://kubernetes.default.svc
namespace: application3. apply to k8s
kubectl -n argocd apply -f deploy-datahub.yaml4. sync by argocd
argocd app sync argocd/datahub5. extract credientials
kubectl -n application get secret datahub-user-secret -o jsonpath='{.data.user\.props}' | base64 -d[Optional] Visit though browser
add
$K8S_MASTER_IP datahub.dev.geekcity.techto/etc/hosts
- datahub frontend: https://datahub.dev.geekcity.tech:32443
- api: https://datahub.dev.geekcity.tech:32443/openapi/swagger-ui/index.html
[Optional] Visit though DatahubCLI
We recommend Python virtual environments (venv-s) to namespace pip modules. Here’s an example setup:
python3 -m venv venv # create the environment
source venv/bin/activate # activate the environmentNOTE: If you install datahub in a virtual environment, that same virtual environment must be re-activated each time a shell window or session is created.
Once inside the virtual environment, install datahub using the following commands
# Requires Python 3.8+
python3 -m pip install --upgrade pip wheel setuptools
python3 -m pip install --upgrade acryl-datahub
# validate that the install was successful
datahub version
# If you see "command not found", try running this instead: python3 -m datahub version
datahub init
# authenticate your datahub CLI with your datahub instanceAuth
Subsections of Auth
Deploy GateKeeper Server
Official Website: https://open-policy-agent.github.io/gatekeeper/website/
Preliminary
- Kubernetes 版本必须大于
v1.16
Components
Gatekeeper 是基于 Open Policy Agent(OPA) 构建的 Kubernetes 准入控制器,它允许用户定义和实施自定义策略,以控制 Kubernetes 集群中资源的创建、更新和删除操作
- 核心组件
- 约束模板(Constraint Templates):定义策略的规则逻辑,使用 Rego 语言编写。它是策略的抽象模板,可以被多个约束实例(Constraint Instance)复用。
- 约束实例(Constraints Instance):基于约束模板创建的具体策略实例,指定了具体的参数和匹配规则,用于定义哪些资源需要应用该策略。
- 准入控制器(Admission Controller)(无需修改):拦截 Kubernetes API Server 的请求,根据定义的约束对请求进行评估,如果请求违反了任何约束,则拒绝该请求。
Features
约束管理
自定义约束模板:用户可以使用 Rego 语言编写自定义的约束模板,实现各种复杂的策略逻辑。
例如,可以定义策略要求所有的命名空间 NameSpace 必须设置特定的标签,或者限制某些命名空间只能使用特定的镜像。
约束模板复用:约束模板可以被多个约束实例复用,提高了策略的可维护性和复用性。
例如,可以创建一个通用的标签约束模板,然后在不同的命名空间 NameSpace 中创建不同的约束实例,要求不同的标签。
约束更新:当约束模板或约束发生更新时,Gatekeeper 会自动重新评估所有相关的资源,确保策略的实时生效。
资源控制
准入拦截:当有资源创建或更新请求时,Gatekeeper 会实时拦截请求,并根据策略进行评估。如果请求违反了策略,会立即拒绝请求,并返回详细的错误信息,帮助用户快速定位问题。
资源创建和更新限制:Gatekeeper 可以阻止不符合策略的资源创建和更新请求。
例如,如果定义了一个策略要求所有的 Deployment 必须设置资源限制(requests 和 limits),那么当用户尝试创建或更新一个没有设置资源限制的 Deployment 时,请求将被拒绝。
通过enforcementAction来控制,可选:dryrun | deny | warn
check https://open-policy-agent.github.io/gatekeeper-library/website/validation/containerlimits
资源类型过滤:可以通过约束的 match 字段指定需要应用策略的资源类型和命名空间。
例如,可以只对特定命名空间中的 Pod 应用策略,或者只对特定 API 组和版本的资源应用策略。
可以通过syncSet (同步配置)来指定过滤和忽略那些资源
合规性保证
行业标准和自定义规范:Gatekeeper 可以确保 Kubernetes 集群中的资源符合行业标准和管理员要求的内部的安全规范。
例如,可以定义策略要求所有的容器必须使用最新的安全补丁,或者要求所有的存储卷必须进行加密。
Gatekeeper 已经提供近50种各类资源限制的约束策略,可以通过访问https://open-policy-agent.github.io/gatekeeper-library/website/ 查看并获得
审计和报告:Gatekeeper 可以记录所有的策略评估结果,方便管理员进行审计和报告。通过查看审计日志,管理员可以了解哪些资源违反了策略,以及违反了哪些策略。
审计导出:审计日志可以导出并接入下游。
详细信息可以查看https://open-policy-agent.github.io/gatekeeper/website/docs/pubsub/
Installation
kubectl apply -f https://raw.githubusercontent.com/open-policy-agent/gatekeeper/v3.18.2/deploy/gatekeeper.yamlhelm repo add gatekeeper https://open-policy-agent.github.io/gatekeeper/charts
helm install gatekeeper/gatekeeper --name-template=gatekeeper --namespace gatekeeper-system --create-namespaceMake sure that:
- You have Docker version 20.10 or later installed.
- Your kubectl context is set to the desired installation cluster.
- You have a container registry you can write to that is readable by the target cluster.
git clone https://github.com/open-policy-agent/gatekeeper.git \
&& cd gatekeeper - Build and push Gatekeeper image:
export DESTINATION_GATEKEEPER_IMAGE=<add registry like "myregistry.docker.io/gatekeeper">
make docker-buildx REPOSITORY=$DESTINATION_GATEKEEPER_IMAGE OUTPUT_TYPE=type=registry- And the deploy
make deploy REPOSITORY=$DESTINATION_GATEKEEPER_IMAGEBinary
Subsections of Binary
Argo Workflow Binary
MIRROR="files.m.daocloud.io/"
VERSION=v3.5.4
curl -sSLo argo-linux-amd64.gz "https://${MIRROR}github.com/argoproj/argo-workflows/releases/download/${VERSION}/argo-linux-amd64.gz"
gunzip argo-linux-amd64.gz
chmod u+x argo-linux-amd64
mkdir -p ${HOME}/bin
mv -f argo-linux-amd64 ${HOME}/bin/argo
rm -f argo-linux-amd64.gzArgoCD Binary
MIRROR="files.m.daocloud.io/"
VERSION=v3.1.8
[ $(uname -m) = x86_64 ] && curl -sSLo argocd "https://${MIRROR}github.com/argoproj/argo-cd/releases/download/${VERSION}/argocd-linux-amd64"
[ $(uname -m) = aarch64 ] && curl -sSLo argocd "https://${MIRROR}github.com/argoproj/argo-cd/releases/download/${VERSION}/argocd-linux-arm64"
chmod u+x argocd
mkdir -p ${HOME}/bin
mv -f argocd ${HOME}/bin[Optional] add to PATH
cat >> ~/.bashrc << EOF
export PATH=$PATH:/root/bin
EOF
source ~/.bashrcGolang Binary
# sudo rm -rf /usr/local/go # 删除旧版本
wget https://go.dev/dl/go1.24.4.linux-amd64.tar.gz
tar -C /usr/local -xzf go1.24.4.linux-amd64.tar.gz
vim ~/.bashrc
export PATH=$PATH:/usr/local/go/bin
source ~/.bashrc
rm -rf ./go1.24.4.linux-amd64.tar.gzGradle Binary
MIRROR="files.m.daocloud.io/"
VERSION=v3.5.4
curl -sSLo argo-linux-amd64.gz "https://${MIRROR}github.com/argoproj/argo-workflows/releases/download/${VERSION}/argo-linux-amd64.gz"
gunzip argo-linux-amd64.gz
chmod u+x argo-linux-amd64
mkdir -p ${HOME}/bin
mv -f argo-linux-amd64 ${HOME}/bin/argo
rm -f argo-linux-amd64.gzHelm Binary
ARCH_IN_FILE_NAME=linux-amd64
FILE_NAME=helm-v3.18.3-${ARCH_IN_FILE_NAME}.tar.gz
curl -sSLo ${FILE_NAME} "https://files.m.daocloud.io/get.helm.sh/${FILE_NAME}"
tar zxf ${FILE_NAME}
mkdir -p ${HOME}/bin
mv -f ${ARCH_IN_FILE_NAME}/helm ${HOME}/bin
rm -rf ./${FILE_NAME}
rm -rf ./${ARCH_IN_FILE_NAME}
chmod u+x ${HOME}/bin/helmJQ Binary
JQ_VERSION=1.7
JQ_BINARY=jq-linux64
wget https://github.com/stedolan/jq/releases/download/jq-${JQ_VERSION}/${JQ_BINARY}.tar.gz -O - | tar xz && mv ${JQ_BINARY} /usr/bin/jqKind Binary
MIRROR="files.m.daocloud.io/"
VERSION=v0.29.0
[ $(uname -m) = x86_64 ] && curl -sSLo kind "https://${MIRROR}github.com/kubernetes-sigs/kind/releases/download/${VERSION}/kind-linux-amd64"
[ $(uname -m) = aarch64 ] && curl -sSLo kind "https://${MIRROR}github.com/kubernetes-sigs/kind/releases/download/${VERSION}/kind-linux-arm64"
chmod u+x kind
mkdir -p ${HOME}/bin
mv -f kind ${HOME}/binKrew Binary
cd "$(mktemp -d)" &&
OS="$(uname | tr '[:upper:]' '[:lower:]')" &&
ARCH="$(uname -m | sed -e 's/x86_64/amd64/' -e 's/\(arm\)\(64\)\?.*/\1\2/' -e 's/aarch64$/arm64/')" &&
KREW="krew-${OS}_${ARCH}" &&
curl -fsSLO "https://github.com/kubernetes-sigs/krew/releases/latest/download/${KREW}.tar.gz" &&
tar zxvf "${KREW}.tar.gz" &&
./"${KREW}" install krewKubectl Binary
MIRROR="files.m.daocloud.io/"
VERSION=$(curl -L -s https://${MIRROR}dl.k8s.io/release/stable.txt)
[ $(uname -m) = x86_64 ] && curl -sSLo kubectl "https://${MIRROR}dl.k8s.io/release/${VERSION}/bin/linux/amd64/kubectl"
[ $(uname -m) = aarch64 ] && curl -sSLo kubectl "https://${MIRROR}dl.k8s.io/release/${VERSION}/bin/linux/arm64/kubectl"
chmod u+x kubectl
mkdir -p ${HOME}/bin
mv -f kubectl ${HOME}/binKustomize Binary
MIRROR="github.com"
VERSION="v5.7.1"
[ $(uname -m) = x86_64 ] && curl -sSLo kustomize "https:///${MIRROR}/kubernetes-sigs/kustomize/releases/download/kustomize/${VERSION}/kustomize_${VERSION}_linux_amd64.tar.gz"
[ $(uname -m) = aarch64 ] && curl -sSLo kustomize "https:///${MIRROR}/kubernetes-sigs/kustomize/releases/download/kustomize/${VERSION}/kustomize_${VERSION}_linux_arm64.tar.gz"
chmod u+x kustomize
mkdir -p ${HOME}/bin
mv -f kustomize ${HOME}/binMaven Binary
wget https://dlcdn.apache.org/maven/maven-3/3.9.6/binaries/apache-maven-3.9.6-bin.tar.gz
tar xzf apache-maven-3.9.6-bin.tar.gz -C /usr/local
ln -sfn /usr/local/apache-maven-3.9.6/bin/mvn /root/bin/mvn
export PATH=$PATH:/usr/local/apache-maven-3.9.6/bin
source ~/.bashrcMinikube Binary
MIRROR="files.m.daocloud.io/"
[ $(uname -m) = x86_64 ] && curl -sSLo minikube "https://${MIRROR}storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64"
[ $(uname -m) = aarch64 ] && curl -sSLo minikube "https://${MIRROR}storage.googleapis.com/minikube/releases/latest/minikube-linux-arm64"
chmod u+x minikube
mkdir -p ${HOME}/bin
mv -f minikube ${HOME}/binOpen Java
mkdir -p /etc/apt/keyrings && \
wget -qO - https://packages.adoptium.net/artifactory/api/gpg/key/public | gpg --dearmor -o /etc/apt/keyrings/adoptium.gpg && \
echo "deb [signed-by=/etc/apt/keyrings/adoptium.gpg arch=amd64] https://packages.adoptium.net/artifactory/deb $(awk -F= '/^VERSION_CODENAME/{print$2}' /etc/os-release) main" | tee /etc/apt/sources.list.d/adoptium.list > /dev/null && \
apt-get update && \
apt-get install -y temurin-21-jdk && \
apt-get clean && \
rm -rf /var/lib/apt/lists/*YQ Binary
YQ_VERSION=v4.40.5
YQ_BINARY=yq_linux_amd64
wget https://github.com/mikefarah/yq/releases/download/${YQ_VERSION}/${YQ_BINARY}.tar.gz -O - | tar xz && mv ${YQ_BINARY} /usr/bin/yqCICD
Articles
FQA
Subsections of CICD
Install Argo CD
Preliminary
1. install argoCD binary
2. install components
crds:
install: true
keep: false
global:
domain: argo-cd.ay.dev
revisionHistoryLimit: 3
image:
repository: m.daocloud.io/quay.io/argoproj/argocd
imagePullPolicy: IfNotPresent
redis:
enabled: true
image:
repository: m.daocloud.io/docker.io/library/redis
exporter:
enabled: false
image:
repository: m.daocloud.io/bitnami/redis-exporter
metrics:
enabled: false
redis-ha:
enabled: false
image:
repository: m.daocloud.io/docker.io/library/redis
configmapTest:
repository: m.daocloud.io/docker.io/koalaman/shellcheck
haproxy:
enabled: false
image:
repository: m.daocloud.io/docker.io/library/haproxy
exporter:
enabled: false
image: m.daocloud.io/docker.io/oliver006/redis_exporter
dex:
enabled: true
image:
repository: m.daocloud.io/ghcr.io/dexidp/dex
server:
ingress:
enabled: true
ingressClassName: nginx
annotations:
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
hostname: argo-cd.ay.dev
path: /
pathType: Prefix
tls: true
helm upgrade --install argo-cd argo-cd \
--namespace argocd \
--create-namespace \
--version 8.3.5 \
--repo https://aaronyang0628.github.io/helm-chart-mirror/charts \
--values argocd.values.yaml \
--atomic
helm install argo-cd argo-cd \
--namespace argocd \
--create-namespace \
--version 8.3.5 \
--repo https://argoproj.github.io/argo-helm \
--values argocd.values.yaml \
--atomic
by default you can install argocd by this link
kubectl create namespace argocd \
&& kubectl apply -n argocd -f https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml4. prepare argocd-server-external.yaml
kubectl -n argocd apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: server
app.kubernetes.io/instance: argo-cd
app.kubernetes.io/name: argocd-server-external
app.kubernetes.io/part-of: argocd
name: argocd-server-external
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: 8080
nodePort: 30443
selector:
app.kubernetes.io/instance: argo-cd
app.kubernetes.io/name: argocd-server
type: NodePort
EOFkubectl -n argocd apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: server
app.kubernetes.io/instance: argo-cd
app.kubernetes.io/name: argocd-server-external
app.kubernetes.io/part-of: argocd
app.kubernetes.io/version: v2.8.4
name: argocd-server-external
spec:
ports:
- name: https
port: 443
protocol: TCP
targetPort: 8080
nodePort: 30443
selector:
app.kubernetes.io/instance: argo-cd
app.kubernetes.io/name: argocd-server
type: NodePort
EOF5. create external service
kubectl -n argocd apply -f argocd-server-external.yaml6. [Optional] prepare argocd-server-ingress.yaml
Before you create ingress, you need to create cert-manager and cert-issuer self-signed-ca-issuer, if not, please check 🔗link
kubectl -n argocd apply -f - <<EOF
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
name: argo-cd-argocd-server
namespace: argocd
spec:
ingressClassName: nginx
rules:
- host: argo-cd.ay.dev
http:
paths:
- backend:
service:
name: argo-cd-argocd-server
port:
number: 443
path: /
pathType: Prefix
tls:
- hosts:
- argo-cd.ay.dev
secretName: argo-cd.ay.dev-tls
EOFapiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/backend-protocol: HTTPS
nginx.ingress.kubernetes.io/ssl-passthrough: "true"
name: argo-cd-argocd-server
namespace: argocd
spec:
ingressClassName: nginx
rules:
- host: argo-cd.ay.dev
http:
paths:
- backend:
service:
name: argo-cd-argocd-server
port:
number: 443
path: /
pathType: Prefix
tls:
- hosts:
- argo-cd.ay.dev
secretName: argo-cd.ay.dev-tls7. [Optional] create external service
kubectl -n argocd apply -f argocd-server-external.yaml8. get argocd initialized password
kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d9. login argocd
ARGOCD_PASS=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
MASTER_IP=$(kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o jsonpath='{$.items[0].status.addresses[?(@.type=="InternalIP")].address}')
argocd login --insecure --username admin $MASTER_IP:30443 --password $ARGOCD_PASSif you deploy argocd in minikube, you might need to forward this port
ssh -i ~/.minikube/machines/minikube/id_rsa docker@$(minikube ip) -L '*:30443:0.0.0.0:30443' -N -fopen https://$(minikube ip):30443if you use ingress, you might need to configure your browser to allow insecure connection
kubectl -n basic-components get secret root-secret -o jsonpath='{.data.tls\.crt}' | base64 -d > cert-manager-self-signed-ca-secret.crtopen https://argo-cd.ay.devInstall Argo WorkFlow
Preliminary
- Kubernets has installed, if not check 🔗link
- Argo CD has installed, if not check 🔗link
- cert-manager has installed on argocd and the clusterissuer has a named
self-signed-ca-issuerservice, , if not check 🔗link
0. create workflow related namespace
kubectl get namespace business-workflows > /dev/null 2>&1 || kubectl create namespace business-workflows1. prepare argo-workflows.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argo-workflows
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://argoproj.github.io/argo-helm
chart: argo-workflows
targetRevision: 0.45.27
helm:
releaseName: argo-workflows
values: |
crds:
install: true
keep: false
singleNamespace: false
controller:
image:
registry: m.daocloud.io/quay.io
workflowNamespaces:
- business-workflows
executor:
image:
registry: m.daocloud.io/quay.io
workflow:
serviceAccount:
create: true
rbac:
create: true
server:
enabled: true
image:
registry: m.daocloud.io/quay.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
hosts:
- argo-workflows.ay.dev
paths:
- /?(.*)
pathType: ImplementationSpecific
tls:
- secretName: argo-workflows.ay.dev-tls
hosts:
- argo-workflows.ay.dev
authModes:
- server
- client
sso:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: workflowskubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argo-workflows
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://argoproj.github.io/argo-helm
chart: argo-workflows
targetRevision: 0.45.27
helm:
releaseName: argo-workflows
values: |
crds:
install: true
keep: false
singleNamespace: false
controller:
image:
registry: m.daocloud.io/quay.io
workflowNamespaces:
- business-workflows
executor:
image:
registry: m.daocloud.io/quay.io
workflow:
serviceAccount:
create: true
rbac:
create: true
server:
enabled: true
image:
registry: m.daocloud.io/quay.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
nginx.ingress.kubernetes.io/use-regex: "true"
hosts:
- argo-workflows.ay.dev
paths:
- /?(.*)
pathType: ImplementationSpecific
tls:
- secretName: argo-workflows.ay.dev-tls
hosts:
- argo-workflows.ay.dev
authModes:
- server
- client
sso:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: workflows
EOF2. install argo workflow binary
3. [Optional] apply to k8s
kubectl -n argocd apply -f argo-workflows.yaml4. sync by argocd
argocd app sync argocd/argo-workflows5. submit a test workflow
argo -n business-workflows submit https://raw.githubusercontent.com/argoproj/argo-workflows/master/examples/hello-world.yaml --serviceaccount=argo-workflow6. check workflow status
# list all flows
argo -n business-workflows list# get specific flow status
argo -n business-workflows get <$flow_name># get specific flow log
argo -n business-workflows logs <$flow_name># get specific flow log continuously
argo -n business-workflows logs <$flow_name> --watchInstall Argo Event
Preliminary
1. prepare argo-events.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: argo-events
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://argoproj.github.io/argo-helm
chart: argo-events
targetRevision: 2.4.2
helm:
releaseName: argo-events
values: |
openshift: false
createAggregateRoles: true
crds:
install: true
keep: true
global:
image:
repository: m.daocloud.io/quay.io/argoproj/argo-events
controller:
replicas: 1
resources: {}
webhook:
enabled: true
replicas: 1
port: 12000
resources: {}
extraObjects:
- apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
labels:
app.kubernetes.io/instance: argo-events
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: argo-events-events-webhook
app.kubernetes.io/part-of: argo-events
argocd.argoproj.io/instance: argo-events
name: argo-events-webhook
spec:
ingressClassName: nginx
rules:
- host: argo-events.webhook.ay.dev
http:
paths:
- backend:
service:
name: events-webhook
port:
number: 12000
path: /?(.*)
pathType: ImplementationSpecific
tls:
- hosts:
- argo-events.webhook.ay.dev
secretName: argo-events-webhook-tls
destination:
server: https://kubernetes.default.svc
namespace: argocd4. apply to k8s
kubectl -n argocd apply -f argo-events.yaml5. sync by argocd
argocd app sync argocd/argo-eventsContainer
Articles
FQA
Subsections of Container
Install Buildah
Reference
- you can directly install docker engine from 🐶buildah official website.
Prerequisites
Kernel Version Requirements To run Buildah on Red Hat Enterprise Linux or CentOS, version 7.4 or higher is required. On other Linux distributions Buildah requires a kernel version that supports the OverlayFS and/or fuse-overlayfs filesystem – you’ll need to consult your distribution’s documentation to determine a minimum version number.
runcRequirement Buildah uses runc to run commands when buildah run is used, or when buildah build encounters a RUN instruction, so you’ll also need to build and install a compatible version of runc for Buildah to call for those cases. If Buildah is installed via a package manager such as yum, dnf or apt-get, runc will be installed as part of that process.CNI Requirement When Buildah uses runc to run commands, it defaults to running those commands in the host’s network namespace. If the command is being run in a separate user namespace, though, for example when ID mapping is used, then the command will also be run in a separate network namespace.
A newly-created network namespace starts with no network interfaces, so commands which are run in that namespace are effectively disconnected from the network unless additional setup is done. Buildah relies on the CNI library and plugins to set up interfaces and routing for network namespaces.
Installation
Caution
If you already have something wrong with apt update, please check the following 🔗link, adding docker source wont help you to solve that problem.
sudo dnf update -y
sudo dnf -y install buildahOnce the installation is complete, The buildah images command will list all the images:
buildah imagessudo yum -y install buildahOnce the installation is complete, start the Docker service
sudo systemctl enable docker
sudo systemctl start docker- Set up Docker’s apt repository.
sudo apt-get -y update
sudo apt-get -y install buildah- Verify that the installation is successful by running the hello-world image:
sudo buildah run hello-worldInfo
- Docker Image saved in
/var/lib/docker
Mirror
You can modify /etc/docker/daemon.json
{
"registry-mirrors": ["<$mirror_url>"]
}for example:
https://docker.mirrors.ustc.edu.cn
Install Docker
Install Podman
Reference
- you can directly install docker engine from 🐳docker official website.
Installation
Caution
If you already have something wrong with apt update, please check the following 🔗link, adding docker source wont help you to solve that problem.
sudo dnf update -y
sudo dnf -y install podmansudo yum install -y podmansudo apt-get update
sudo apt-get -y install podmanRun Params
start an container
podman run [params]-rm: delete if failed
-v: load a volume
Example
podman run --rm\
-v /root/kserve/iris-input.json:/tmp/iris-input.json \
--privileged \
-e MODEL_NAME=sklearn-iris \
-e INPUT_PATH=/tmp/iris-input.json \
-e SERVICE_HOSTNAME=sklearn-iris.kserve-test.example.com \
-it m.daocloud.io/docker.io/library/golang:1.22 sh -c "command A; command B; exec bash"Database
Subsections of Database
Install Clickhouse
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. argoCD has installed, if not check 🔗link3. cert-manager has installed on argocd and the clusterissuer has a named `self-signed-ca-issuer`service, , if not check 🔗link1.prepare admin credentials secret
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic clickhouse-admin-credentials \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)2.prepare `deploy-clickhouse.yaml`
Details
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: clickhouse
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: clickhouse
targetRevision: 4.5.1
helm:
releaseName: clickhouse
values: |
serviceAccount:
name: clickhouse
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
zookeeper:
enabled: true
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
replicaCount: 3
persistence:
enabled: true
storageClass: nfs-external
size: 8Gi
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
shards: 2
replicaCount: 3
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hostname: clickhouse.dev.geekcity.tech
ingressClassName: nginx
path: /?(.*)
tls: true
persistence:
enabled: false
resources:
requests:
cpu: 2
memory: 512Mi
limits:
cpu: 3
memory: 1024Mi
auth:
username: admin
existingSecret: clickhouse-admin-credentials
existingSecretKey: password
metrics:
enabled: true
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
serviceMonitor:
enabled: true
namespace: monitor
jobLabel: clickhouse
selector:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: clickhouse
labels:
release: prometheus-stack
extraDeploy:
- |
apiVersion: apps/v1
kind: Deployment
metadata:
name: clickhouse-tool
namespace: database
labels:
app.kubernetes.io/name: clickhouse-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: clickhouse-tool
template:
metadata:
labels:
app.kubernetes.io/name: clickhouse-tool
spec:
containers:
- name: clickhouse-tool
image: m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine
imagePullPolicy: IfNotPresent
env:
- name: CLICKHOUSE_USER
value: admin
- name: CLICKHOUSE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: clickhouse-admin-credentials
- name: CLICKHOUSE_HOST
value: csst-clickhouse.csst
- name: CLICKHOUSE_PORT
value: "9000"
- name: TZ
value: Asia/Shanghai
command:
- tail
args:
- -f
- /etc/hosts
destination:
server: https://kubernetes.default.svc
namespace: database3.deploy clickhouse
Details
kubectl -n argocd apply -f deploy-clickhouse.yaml4.sync by argocd
Details
argocd app sync argocd/clickhouse5.prepare `clickhouse-interface.yaml`
Details
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: clickhouse
name: clickhouse-interface
spec:
ports:
- name: http
port: 8123
protocol: TCP
targetPort: http
nodePort: 31567
- name: tcp
port: 9000
protocol: TCP
targetPort: tcp
nodePort: 32005
selector:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: clickhouse
app.kubernetes.io/name: clickhouse
type: NodePort6.apply to k8s
Details
kubectl -n database apply -f clickhouse-interface.yaml7.extract clickhouse admin credentials
Details
kubectl -n database get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d8.invoke http api
Details
add `$K8S_MASTER_IP clickhouse.dev.geekcity.tech` to **/etc/hosts**CK_PASS=$(kubectl -n database get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d)
echo 'SELECT version()' | curl -k "https://admin:${CK_PASS}@clickhouse.dev.geekcity.tech:32443/" --data-binary @-Preliminary
1. Docker has installed, if not check 🔗linkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
mkdir -p clickhouse/{data,logs}
podman run --rm \
--ulimit nofile=262144:262144 \
--name clickhouse-server \
-p 18123:8123 \
-p 19000:9000 \
-v $(pwd)/clickhouse/data:/var/lib/clickhouse \
-v $(pwd)/clickhouse/logs:/var/log/clickhouse-server \
-e CLICKHOUSE_DB=my_database \
-e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 \
-e CLICKHOUSE_USER=ayayay \
-e CLICKHOUSE_PASSWORD=123456 \
-d m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine2.check dashboard
And then you can visit 🔗http://localhost:181233.use cli api
And then you can visit 🔗http://localhost:19000Details
podman run --rm \
--entrypoint clickhouse-client \
-it m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine \
--host host.containers.internal \
--port 19000 \
--user ayayay \
--password 123456 \
--query "select version()"4.use visual client
Details
podman run --rm -p 8080:80 -d m.daocloud.io/docker.io/spoonest/clickhouse-tabix-web-client:stablePreliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)2.apply rolebinding to k8s
Details
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF4.prepare clickhouse admin credentials secret
Details
kubectl get namespace application > /dev/null 2>&1 || kubectl create namespace application
kubectl -n application create secret generic clickhouse-admin-credentials \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)5.prepare deploy-clickhouse-flow.yaml
Details
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-ck-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-clickhouse
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: clickhouse
targetRevision: 4.5.3
helm:
releaseName: app-clickhouse
values: |
image:
registry: docker.io
repository: bitnami/clickhouse
tag: 23.12.3-debian-11-r0
pullPolicy: IfNotPresent
service:
type: ClusterIP
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
path: /?(.*)
hostname: clickhouse.dev.geekcity.tech
tls: true
shards: 2
replicaCount: 3
persistence:
enabled: false
auth:
username: admin
existingSecret: clickhouse-admin-credentials
existingSecretKey: password
zookeeper:
enabled: true
image:
registry: m.daocloud.io/docker.io
repository: bitnami/zookeeper
tag: 3.8.3-debian-11-r8
pullPolicy: IfNotPresent
replicaCount: 3
persistence:
enabled: false
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-clickhouse ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-clickhouse6.subimit to argo workflow client
Details
argo -n business-workflows submit deploy-clickhouse-flow.yaml7.extract clickhouse admin credentials
Details
kubectl -n application get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d8.invoke http api
Details
add `$K8S_MASTER_IP clickhouse.dev.geekcity.tech` to **/etc/hosts**CK_PASSWORD=$(kubectl -n application get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d) && echo 'SELECT version()' | curl -k "https://admin:${CK_PASSWORD}@clickhouse.dev.geekcity.tech/" --data-binary @-9.create external interface
Details
kubectl -n application apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: app-clickhouse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: clickhouse
app.kubernetes.io/version: 23.12.2
argocd.argoproj.io/instance: app-clickhouse
helm.sh/chart: clickhouse-4.5.3
name: app-clickhouse-service-external
spec:
ports:
- name: tcp
port: 9000
protocol: TCP
targetPort: tcp
nodePort: 30900
selector:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: app-clickhouse
app.kubernetes.io/name: clickhouse
type: NodePort
EOFFAQ
Install ElasticSearch
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-nameUsing Proxy
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare `deploy-elasticsearch.yaml`
Details
kubectl apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: elastic-search
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: elasticsearch
targetRevision: 19.11.3
helm:
releaseName: elastic-search
values: |
global:
kibanaEnabled: true
clusterName: elastic
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
security:
enabled: false
service:
type: ClusterIP
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hostname: elastic-search.dev.tech
ingressClassName: nginx
path: /?(.*)
tls: true
master:
masterOnly: false
replicaCount: 1
persistence:
enabled: false
resources:
requests:
cpu: 2
memory: 1024Mi
limits:
cpu: 4
memory: 4096Mi
heapSize: 2g
data:
replicaCount: 0
persistence:
enabled: false
coordinating:
replicaCount: 0
ingest:
enabled: true
replicaCount: 0
service:
enabled: false
type: ClusterIP
ingress:
enabled: false
metrics:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
sysctlImage:
enabled: true
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
kibana:
elasticsearch:
hosts:
- '{{ include "elasticsearch.service.name" . }}'
port: '{{ include "elasticsearch.service.ports.restAPI" . }}'
esJavaOpts: "-Xmx2g -Xms2g"
destination:
server: https://kubernetes.default.svc
namespace: application
EOF3.sync by argocd
Details
argocd app sync argocd/elastic-search4.extract elasticsearch admin credentials
Details
a5.invoke http api
Details
add `$K8S_MASTER_IP elastic-search.dev.tech` to `/etc/hosts`curl -k -H "Content-Type: application/json" \
-X POST "https://elastic-search.dev.tech:32443/books/_doc?pretty" \
-d '{"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470}'Preliminary
1. Docker|Podman|Buildah has installed, if not check 🔗linkUsing Mirror
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database2.apply rolebinding to k8s
Details
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF4.prepare `deploy-xxxx-flow.yaml`
Details
6.subimit to argo workflow client
Details
argo -n business-workflows submit deploy-xxxx-flow.yaml7.decode password
Details
kubectl -n application get secret xxxx-credentials -o jsonpath='{.data.xxx-password}' | base64 -dFAQ
Install Kafka
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link1.get helm repo
Details
helm repo add bitnami oci://registry-1.docker.io/bitnamicharts/kafka
helm repo update2.install chart
helm upgrade --create-namespace -n database kafka --install bitnami/kafka \
--set global.imageRegistry=m.daocloud.io/docker.io \
--set zookeeper.enabled=false \
--set controller.replicaCount=1 \
--set broker.replicaCount=1 \
--set persistance.enabled=false \
--version 28.0.3
helm upgrade --create-namespace -n database kafka --install bitnami/kafka \
--set global.imageRegistry=m.daocloud.io/docker.io \
--set zookeeper.enabled=false \
--set controller.replicaCount=1 \
--set broker.replicaCount=1 \
--set persistance.enabled=false \
--version 28.0.3
Details
kubectl -n database \
create secret generic client-properties \
--from-literal=client.properties="$(printf "security.protocol=SASL_PLAINTEXT\nsasl.mechanism=SCRAM-SHA-256\nsasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username=\"user1\" password=\"$(kubectl get secret kafka-user-passwords --namespace database -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)\";\n")"Details
kubectl -n database apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-client-tools
labels:
app: kafka-client-tools
spec:
replicas: 1
selector:
matchLabels:
app: kafka-client-tools
template:
metadata:
labels:
app: kafka-client-tools
spec:
volumes:
- name: client-properties
secret:
secretName: client-properties
containers:
- name: kafka-client-tools
image: m.daocloud.io/docker.io/bitnami/kafka:3.6.2
volumeMounts:
- name: client-properties
mountPath: /bitnami/custom/client.properties
subPath: client.properties
readOnly: true
env:
- name: BOOTSTRAP_SERVER
value: kafka.database.svc.cluster.local:9092
- name: CLIENT_CONFIG_FILE
value: /bitnami/custom/client.properties
command:
- tail
- -f
- /etc/hosts
imagePullPolicy: IfNotPresent
EOF3.validate function
- list topicsDetails
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --list'Details
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --create --if-not-exists --topic test-topic'Details
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --describe --topic test-topic'Details
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'for message in $(seq 0 10); do echo $message | kafka-console-producer.sh --bootstrap-server $BOOTSTRAP_SERVER --producer.config $CLIENT_CONFIG_FILE --topic test-topic; done'Details
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-console-consumer.sh --bootstrap-server $BOOTSTRAP_SERVER --consumer.config $CLIENT_CONFIG_FILE --topic test-topic --from-beginning'Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link1.prepare `deploy-kafka.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kafka
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: kafka
targetRevision: 28.0.3
helm:
releaseName: kafka
values: |
image:
registry: m.daocloud.io/docker.io
controller:
replicaCount: 1
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
broker:
replicaCount: 1
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
externalAccess:
enabled: false
autoDiscovery:
enabled: false
image:
registry: m.daocloud.io/docker.io
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
kafka:
enabled: false
image:
registry: m.daocloud.io/docker.io
jmx:
enabled: false
image:
registry: m.daocloud.io/docker.io
provisioning:
enabled: false
kraft:
enabled: true
zookeeper:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: database
EOFkubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kafka
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: kafka
targetRevision: 28.0.3
helm:
releaseName: kafka
values: |
image:
registry: m.daocloud.io/docker.io
listeners:
client:
protocol: PLAINTEXT
interbroker:
protocol: PLAINTEXT
controller:
replicaCount: 0
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
broker:
replicaCount: 1
minId: 0
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
externalAccess:
enabled: false
autoDiscovery:
enabled: false
image:
registry: m.daocloud.io/docker.io
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
kafka:
enabled: false
image:
registry: m.daocloud.io/docker.io
jmx:
enabled: false
image:
registry: m.daocloud.io/docker.io
provisioning:
enabled: false
kraft:
enabled: false
zookeeper:
enabled: true
image:
registry: m.daocloud.io/docker.io
replicaCount: 1
auth:
client:
enabled: false
quorum:
enabled: false
persistence:
enabled: false
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
enabled: false
tls:
client:
enabled: false
quorum:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: database
EOF2.sync by argocd
Details
argocd app sync argocd/kafka3.set up client tool
kubectl -n database \
create secret generic client-properties \
--from-literal=client.properties="$(printf "security.protocol=SASL_PLAINTEXT\nsasl.mechanism=SCRAM-SHA-256\nsasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username=\"user1\" password=\"$(kubectl get secret kafka-user-passwords --namespace database -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)\";\n")"kubectl -n database \
create secret generic client-properties \
--from-literal=client.properties="security.protocol=PLAINTEXT"5.prepare `kafka-client-tools.yaml`
Details
kubectl -n database apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-client-tools
labels:
app: kafka-client-tools
spec:
replicas: 1
selector:
matchLabels:
app: kafka-client-tools
template:
metadata:
labels:
app: kafka-client-tools
spec:
volumes:
- name: client-properties
secret:
secretName: client-properties
containers:
- name: kafka-client-tools
image: m.daocloud.io/docker.io/bitnami/kafka:3.6.2
volumeMounts:
- name: client-properties
mountPath: /bitnami/custom/client.properties
subPath: client.properties
readOnly: true
env:
- name: BOOTSTRAP_SERVER
value: kafka.database.svc.cluster.local:9092
- name: CLIENT_CONFIG_FILE
value: /bitnami/custom/client.properties
- name: ZOOKEEPER_CONNECT
value: kafka-zookeeper.database.svc.cluster.local:2181
command:
- tail
- -f
- /etc/hosts
imagePullPolicy: IfNotPresent
EOF6.validate function
- list topicsDetails
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --list'Details
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --create --if-not-exists --topic test-topic'Details
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --describe --topic test-topic'Details
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'for message in $(seq 0 10); do echo $message | kafka-console-producer.sh --bootstrap-server $BOOTSTRAP_SERVER --producer.config $CLIENT_CONFIG_FILE --topic test-topic; done'Details
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-console-consumer.sh --bootstrap-server $BOOTSTRAP_SERVER --consumer.config $CLIENT_CONFIG_FILE --topic test-topic --from-beginning'Preliminary
1. Docker has installed, if not check 🔗linkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
mkdir -p kafka/data
chmod -R 777 kafka/data
podman run --rm \
--name kafka-server \
--hostname kafka-server \
-p 9092:9092 \
-p 9094:9094 \
-v $(pwd)/kafka/data:/bitnami/kafka/data \
-e KAFKA_CFG_NODE_ID=0 \
-e KAFKA_CFG_PROCESS_ROLES=controller,broker \
-e KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-server:9093 \
-e KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094 \
-e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://host.containers.internal:9094 \
-e KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT \
-e KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER \
-d m.daocloud.io/docker.io/bitnami/kafka:3.6.22.list topic
Details
BOOTSTRAP_SERVER=host.containers.internal:9094
podman run --rm \
-it m.daocloud.io/docker.io/bitnami/kafka:3.6.2 kafka-topics.sh \
--bootstrap-server $BOOTSTRAP_SERVER --list2.create topic
Details
BOOTSTRAP_SERVER=host.containers.internal:9094
# BOOTSTRAP_SERVER=10.200.60.64:9094
TOPIC=test-topic
podman run --rm \
-it m.daocloud.io/docker.io/bitnami/kafka:3.6.2 kafka-topics.sh \
--bootstrap-server $BOOTSTRAP_SERVER \
--create \
--if-not-exists \
--topic $TOPIC2.consume record
Details
BOOTSTRAP_SERVER=host.containers.internal:9094
# BOOTSTRAP_SERVER=10.200.60.64:9094
TOPIC=test-topic
podman run --rm \
-it m.daocloud.io/docker.io/bitnami/kafka:3.6.2 kafka-console-consumer.sh \
--bootstrap-server $BOOTSTRAP_SERVER \
--topic $TOPIC \
--from-beginningFAQ
Install MariaDB
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. argoCD has installed, if not check 🔗link3. cert-manager has installed on argocd and the clusterissuer has a named `self-signed-ca-issuer`service, , if not check 🔗link1.prepare mariadb credentials secret
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)2.prepare `deploy-mariadb.yaml`
Details
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mariadb
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: mariadb
targetRevision: 16.3.2
helm:
releaseName: mariadb
values: |
architecture: standalone
auth:
database: test-mariadb
username: aaron.yang
existingSecret: mariadb-credentials
primary:
extraFlags: "--character-set-server=utf8mb4 --collation-server=utf8mb4_bin"
persistence:
enabled: false
secondary:
replicaCount: 1
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: database3.deploy mariadb
Details
kubectl -n argocd apply -f deploy-mariadb.yaml4.sync by argocd
Details
argocd app sync argocd/mariadb5.check mariadb
Details
kubectl -n database get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -dPreliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)2.apply rolebinding to k8s
Details
kubectl -n argocd apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF3.prepare mariadb credentials secret
Details
kubectl -n application create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)4.prepare `deploy-mariadb-flow.yaml`
Details
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-mariadb-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: init-db-tool
template: init-db-tool
dependencies:
- wait
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-mariadb
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: mariadb
targetRevision: 16.5.0
helm:
releaseName: app-mariadb
values: |
architecture: standalone
auth:
database: geekcity
username: aaron.yang
existingSecret: mariadb-credentials
primary:
persistence:
enabled: false
secondary:
replicaCount: 1
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-mariadb ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-mariadb
- name: init-db-tool
resource:
action: apply
manifest: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-mariadb-tool
namespace: application
labels:
app.kubernetes.io/name: mariadb-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mariadb-tool
template:
metadata:
labels:
app.kubernetes.io/name: mariadb-tool
spec:
containers:
- name: mariadb-tool
image: m.daocloud.io/docker.io/bitnami/mariadb:10.5.12-debian-10-r0
imagePullPolicy: IfNotPresent
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
key: mariadb-root-password
name: mariadb-credentials
- name: TZ
value: Asia/Shanghai5.subimit to argo workflow client
Details
argo -n business-workflows submit deploy-mariadb-flow.yaml6.decode password
Details
kubectl -n application get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -dPreliminary
1. Docker has installed, if not check 🔗linkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
mkdir -p mariadb/data
podman run \
-p 3306:3306 \
-e MARIADB_ROOT_PASSWORD=mysql \
-d m.daocloud.io/docker.io/library/mariadb:11.2.2-jammy \
--log-bin \
--binlog-format=ROW2.use web console
And then you can visit 🔗http://localhost:8080username: `root`
password: `mysql`
Details
podman run --rm -p 8080:80 \
-e PMA_ARBITRARY=1 \
-d m.daocloud.io/docker.io/library/phpmyadmin:5.1.1-apache3.use internal client
Details
podman run --rm \
-e MYSQL_PWD=mysql \
-it m.daocloud.io/docker.io/library/mariadb:11.2.2-jammy \
mariadb \
--host host.containers.internal \
--port 3306 \
--user root \
--database mysql \
--execute 'select version()'Useful SQL
- list all bin logs
SHOW BINARY LOGS;- delete previous bin logs
PURGE BINARY LOGS TO 'mysqld-bin.0000003'; # delete mysqld-bin.0000001 and mysqld-bin.0000002
PURGE BINARY LOGS BEFORE 'yyyy-MM-dd HH:mm:ss';
PURGE BINARY LOGS DATE_SUB(NOW(), INTERVAL 3 DAYS); # delete last three days bin log file.Details
If you using master-slave mode, you can change all BINARY to MASTER
FAQ
Install Milvus
Preliminary
- Kubernetes has installed, if not check link
- argoCD has installed, if not check link
- cert-manager has installed on argocd and the clusterissuer has a named
self-signed-ca-issuerservice, , if not check link - minio has installed, if not check link
Steps
1. copy minio credentials secret
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n storage get secret minio-secret -o json \
| jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' \
| kubectl -n database apply -f -2. prepare deploy-milvus.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: milvus
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: registry-1.docker.io/bitnamicharts
chart: milvus
targetRevision: 11.2.4
helm:
releaseName: milvus
values: |
global:
security:
allowInsecureImages: true
milvus:
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/milvus
tag: 2.5.7-debian-12-r0
pullPolicy: IfNotPresent
auth:
enabled: false
initJob:
forceRun: false
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/pymilvus
tag: 2.5.6-debian-12-r0
pullPolicy: IfNotPresent
resources:
requests:
cpu: 2
memory: 512Mi
limits:
cpu: 2
memory: 2Gi
dataCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2
memory: 2Gi
metrics:
enabled: true
rootCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
queryCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
indexCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
dataNode:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
queryNode:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
indexNode:
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
proxy:
replicaCount: 1
service:
type: ClusterIP
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
attu:
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/attu
tag: 2.5.5-debian-12-r1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
service:
type: ClusterIP
ingress:
enabled: true
ingressClassName: "nginx"
annotations:
cert-manager.io/cluster-issuer: alidns-webhook-zverse-letsencrypt
hostname: milvus.dev.tech
path: /
pathType: ImplementationSpecific
tls: true
waitContainer:
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r40
pullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
externalS3:
host: "minio.storage"
port: 9000
existingSecret: "minio-secret"
existingSecretAccessKeyIDKey: "root-user"
existingSecretKeySecretKey: "root-password"
bucket: "milvus"
rootPath: "file"
etcd:
enabled: true
image:
registry: m.lab.zverse.space/docker.io
replicaCount: 1
auth:
rbac:
create: false
client:
secureTransport: false
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
persistence:
enabled: true
storageClass: ""
size: 2Gi
preUpgradeJob:
enabled: false
minio:
enabled: false
kafka:
enabled: true
image:
registry: m.lab.zverse.space/docker.io
controller:
replicaCount: 1
livenessProbe:
failureThreshold: 8
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
persistence:
enabled: true
storageClass: ""
size: 2Gi
service:
ports:
client: 9092
extraConfig: |-
offsets.topic.replication.factor=3
listeners:
client:
protocol: PLAINTEXT
interbroker:
protocol: PLAINTEXT
external:
protocol: PLAINTEXT
sasl:
enabledMechanisms: "PLAIN"
client:
users:
- user
broker:
replicaCount: 0
destination:
server: https://kubernetes.default.svc
namespace: database3. apply to k8s
kubectl -n argocd apply -f deploy-milvus.yaml4. sync by argocd
argocd app sync argocd/milvus5. check Attu WebUI
milvus address: milvus-proxy:19530
milvus database: default
https://milvus.dev.tech:32443/#/5. [Optional] import data
import data by using sql file
MARIADB_ROOT_PASSWORD=$(kubectl -n database get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d)
POD_NAME=$(kubectl get pod -n database -l "app.kubernetes.io/name=mariadb-tool" -o jsonpath="{.items[0].metadata.name}") \
&& export SQL_FILENAME="Dump20240301.sql" \
&& kubectl -n database cp ${SQL_FILENAME} ${POD_NAME}:/tmp/${SQL_FILENAME} \
&& kubectl -n database exec -it deployment/app-mariadb-tool -- bash -c \
'echo "create database ccds;" | mysql -h mariadb.database -uroot -p$MARIADB_ROOT_PASSWORD' \
&& kubectl -n database exec -it ${POD_NAME} -- bash -c \
"mysql -h mariadb.database -uroot -p\${MARIADB_ROOT_PASSWORD} \
ccds < /tmp/Dump20240301.sql"6. [Optional] decode password
kubectl -n database get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d7. [Optional] execute sql in pod
kubectl -n database exec -it xxxx bashmariadb -h 127.0.0.1 -u root -p$MARIADB_ROOT_PASSWORDAnd then you can check connection by
show status like 'Threads%';Install Neo4j
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-nameUsing Proxy
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare `deploy-xxxxx.yaml`
Details
2.apply to k8s
Details
kubectl -n argocd apply -f xxxx.yaml3.sync by argocd
Details
argocd app sync argocd/xxxx4.prepare yaml-content.yaml
Details
5.apply to k8s
Details
kubectl apply -f xxxx.yaml6.apply xxxx.yaml directly
Details
kubectl apply -f - <<EOF
EOFPreliminary
1. Docker|Podman|Buildah has installed, if not check 🔗linkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
mkdir -p neo4j/data
podman run --rm \
--name neo4j \
-p 7474:7474 \
-p 7687:7687 \
-e neo4j_ROOT_PASSWORD=mysql \
-v $(pwd)/neo4j/data:/data \
-d docker.io/library/neo4j:5.18.0-community-bullseyePreliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database2.apply rolebinding to k8s
Details
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF4.prepare `deploy-xxxx-flow.yaml`
Details
6.subimit to argo workflow client
Details
argo -n business-workflows submit deploy-xxxx-flow.yaml7.decode password
Details
kubectl -n application get secret xxxx-credentials -o jsonpath='{.data.xxx-password}' | base64 -dFAQ
Install Postgresql
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-nameUsing Proxy
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare `deploy-xxxxx.yaml`
Details
2.apply to k8s
Details
kubectl -n argocd apply -f xxxx.yaml3.sync by argocd
Details
argocd app sync argocd/xxxx4.prepare yaml-content.yaml
Details
5.apply to k8s
Details
kubectl apply -f xxxx.yaml6.apply xxxx.yaml directly
Details
kubectl apply -f - <<EOF
EOFPreliminary
1. Docker|Podman|Buildah has installed, if not check 🔗linkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
mkdir -p $(pwd)/postgresql/data
podman run --rm \
--name postgresql \
-p 5432:5432 \
-e POSTGRES_PASSWORD=postgresql \
-e PGDATA=/var/lib/postgresql/data/pgdata \
-v $(pwd)/postgresql/data:/var/lib/postgresql/data \
-d docker.io/library/postgres:15.2-alpine3.172.use web console
Details
podman run --rm \
-p 8080:80 \
-e 'PGADMIN_DEFAULT_EMAIL=ben.wangz@foxmail.com' \
-e 'PGADMIN_DEFAULT_PASSWORD=123456' \
-d docker.io/dpage/pgadmin4:6.153.use internal client
Details
podman run --rm \
--env PGPASSWORD=postgresql \
--entrypoint psql \
-it docker.io/library/postgres:15.2-alpine3.17 \
--host host.containers.internal \
--port 5432 \
--username postgres \
--dbname postgres \
--command 'select version()'Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link5. Minio artifact repository has been configured, if not check 🔗link- endpoint: minio.storage:90001.prepare `argocd-login-credentials`
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
ARGOCD_USERNAME=admin
ARGOCD_PASSWORD=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
kubectl -n business-workflows create secret generic argocd-login-credentials \
--from-literal=username=${ARGOCD_USERNAME} \
--from-literal=password=${ARGOCD_PASSWORD}2.apply rolebinding to k8s
Details
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF3.prepare postgresql admin credentials secret
Details
kubectl -n application create secret generic postgresql-credentials \
--from-literal=postgres-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)4.prepare `deploy-postgresql-flow.yaml`
Details
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-pg-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: init-db-tool
template: init-db-tool
dependencies:
- wait
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-postgresql
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: postgresql
targetRevision: 14.2.2
helm:
releaseName: app-postgresql
values: |
architecture: standalone
auth:
database: geekcity
username: aaron.yang
existingSecret: postgresql-credentials
primary:
persistence:
enabled: false
readReplicas:
replicaCount: 1
persistence:
enabled: false
backup:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-postgresql ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-postgresql
- name: init-db-tool
resource:
action: apply
manifest: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-postgresql-tool
namespace: application
labels:
app.kubernetes.io/name: postgresql-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: postgresql-tool
template:
metadata:
labels:
app.kubernetes.io/name: postgresql-tool
spec:
containers:
- name: postgresql-tool
image: m.daocloud.io/docker.io/bitnami/postgresql:14.4.0-debian-11-r9
imagePullPolicy: IfNotPresent
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: postgres-password
name: postgresql-credentials
- name: TZ
value: Asia/Shanghai
command:
- tail
args:
- -f
- /etc/hosts6.subimit to argo workflow client
Details
argo -n business-workflows submit deploy-postgresql.yaml7.decode password
Details
kubectl -n application get secret postgresql-credentials -o jsonpath='{.data.postgres-password}' | base64 -d8.import data
Details
POSTGRES_PASSWORD=$(kubectl -n application get secret postgresql-credentials -o jsonpath='{.data.postgres-password}' | base64 -d) \
POD_NAME=$(kubectl get pod -n application -l "app.kubernetes.io/name=postgresql-tool" -o jsonpath="{.items[0].metadata.name}") \
&& export SQL_FILENAME="init_dfs_table_data.sql" \
&& kubectl -n application cp ${SQL_FILENAME} ${POD_NAME}:/tmp/${SQL_FILENAME} \
&& kubectl -n application exec -it deployment/app-postgresql-tool -- bash -c \
'echo "CREATE DATABASE csst;" | PGPASSWORD="$POSTGRES_PASSWORD" \
psql --host app-postgresql.application -U postgres -d postgres -p 5432' \
&& kubectl -n application exec -it deployment/app-postgresql-tool -- bash -c \
'PGPASSWORD="$POSTGRES_PASSWORD" psql --host app-postgresql.application \
-U postgres -d csst -p 5432 < /tmp/init_dfs_table_data.sql'FAQ
Install Redis
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-nameUsing Proxy
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare `deploy-xxxxx.yaml`
Details
2.apply to k8s
Details
kubectl -n argocd apply -f xxxx.yaml3.sync by argocd
Details
argocd app sync argocd/xxxx4.prepare yaml-content.yaml
Details
5.apply to k8s
Details
kubectl apply -f xxxx.yaml6.apply xxxx.yaml directly
Details
kubectl apply -f - <<EOF
EOFPreliminary
1. Docker|Podman|Buildah has installed, if not check 🔗linkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
mkdir -p $(pwd)/redis/data
podman run --rm \
--name redis \
-p 6379:6379 \
-d docker.io/library/redis:7.2.4-alpine1.use internal client
Details
podman run --rm \
-it docker.io/library/redis:7.2.4-alpine \
redis-cli \
-h host.containers.internal \
set mykey somevaluePreliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link5. Minio artifact repository has been configured, if not check 🔗link- endpoint: minio.storage:90001.prepare `argocd-login-credentials`
Details
ARGOCD_USERNAME=admin
ARGOCD_PASSWORD=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
kubectl -n business-workflows create secret generic argocd-login-credentials \
--from-literal=username=${ARGOCD_USERNAME} \
--from-literal=password=${ARGOCD_PASSWORD}2.apply rolebinding to k8s
Details
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF3.prepare redis credentials secret
Details
kubectl -n application create secret generic redis-credentials \
--from-literal=redis-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)4.prepare `deploy-redis-flow.yaml`
Details
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-redis-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-redis
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: redis
targetRevision: 18.16.0
helm:
releaseName: app-redis
values: |
architecture: replication
auth:
enabled: true
sentinel: true
existingSecret: redis-credentials
master:
count: 1
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: false
replica:
replicaCount: 3
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sentinel:
enabled: false
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sysctl:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-redis ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-redis6.subimit to argo workflow client
Details
argo -n business-workflows submit deploy-redis-flow.yaml7.decode password
Details
kubectl -n application get secret redis-credentials -o jsonpath='{.data.redis-password}' | base64 -dFAQ
Git
Subsections of Git
Install Act Runner
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update2.prepare `act-runner-secret`
Details
kubectl -n application create secret generic act-runner-secret \
--from-literal=act-runner-token=4w3Sx0Hwe6VFevl473ZZ4nFVDvFvhKcEUBvpJ09L3.prepare values
Details
echo "
replicas: 1
runner:
instanceURL: http://192.168.100.125:30300
token:
fromSecret:
name: "act-runner-secret"
key: "act-runner-token"" > act-runner-values.yaml4.install chart
Details
helm upgrade --create-namespace -n application --install -f ./act-runner-values.yaml act-runner ay-helm-mirror/act-runnerPreliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link1.prepare `act-runner-secret`
Details
kubectl -n application create secret generic act-runner-secret \
--from-literal=act-runner-token=4w3Sx0Hwe6VFevl473ZZ4nFVDvFvhKcEUBvpJ09L2.prepare
act-runner.yaml
kubectl -n argocd apply -f - <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: act-runner
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: act-runner
targetRevision: 0.2.2
helm:
releaseName: act-runner
values: |
image:
name: vegardit/gitea-act-runner
tag: "dind-0.2.13"
repository: m.daocloud.io/docker.io
runner:
instanceURL: https://192.168.100.125:30300
token:
fromSecret:
name: "act-runner-secret"
key: "act-runner-token"
config:
enabled: true
data: |
log:
level: info
runner:
labels:
- ubuntu-latest:docker://m.daocloud.io/docker.gitea.com/runner-images:ubuntu-latest
container:
force_pull: true
persistence:
enabled: true
storageClassName: ""
accessModes: ReadWriteOnce
size: 10Gi
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 3
replicas: 1
securityContext:
privileged: true
runAsUser: 0
runAsGroup: 0
fsGroup: 0
capabilities:
add: ["NET_ADMIN", "SYS_ADMIN"]
podSecurityContext:
runAsUser: 0
runAsGroup: 0
fsGroup: 0
resources:
requests:
cpu: 200m
memory: 512Mi
limits:
cpu: 1000m
memory: 2048Mi
destination:
server: https://kubernetes.default.svc
namespace: application
EOF
4.sync by argocd
Details
argocd app sync argocd/act-runner5.use action
Preliminary
1. Podman has installed, and the `podman` command is available in your PATH.1.prepare data and config dir
Details
mkdir -p /opt/gitea_act_runner/{data,config} \
&& chown -R 1000:1000 /opt/gitea_act_runner \
&& chmod -R 755 /opt/gitea_act_runner2.run container
Details
podman run -it \
--name gitea_act_runner \
--rm \
--privileged \
--network=host \
-v /opt/gitea_act_runner/data:/data \
-v /opt/gitea_act_runner/config:/config \
-v /var/run/podman/podman.sock:/var/run/docker.sock \
-e GITEA_INSTANCE_URL="http://10.200.60.64:30300" \
-e GITEA_RUNNER_REGISTRATION_TOKEN="5lgsrOzfKz3RiqeMWxxUb9RmUPEWNnZ6hTTZV0DL" \
m.daocloud.io/docker.io/gitea/act_runner:latest-dind-rootlessUsing Mirror
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
Preliminary
1. Docker 2. Podman has installed, and the `podman` command is available in your PATH.1.prepare data and config dir
Details
mkdir -p /opt/gitea_act_runner/{data,config} \
&& chown -R 1000:1000 /opt/gitea_act_runner \
&& chmod -R 755 /opt/gitea_act_runner2.run container
Details
docker run -it \
--name gitea_act_runner \
--rm \
--privileged \
--network=host \
-v /opt/gitea_act_runner/data:/data \
-v /opt/gitea_act_runner/config:/config \
-e GITEA_INSTANCE_URL="http://192.168.100.125:30300" \
-e GITEA_RUNNER_REGISTRATION_TOKEN="5lgsrOzfKz3RiqeMWxxUb9RmUPEWNnZ6hTTZV0DL" \
m.daocloud.io/docker.io/gitea/act_runner:latest-dindUsing Mirror
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
FAQ
Install Gitea
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link3. CertManager has installed, if not check 🔗link4. Ingress has installed, if not check 🔗link1.get helm repo
Details
helm repo add gitea-charts https://dl.gitea.com/charts/
helm repo update2.install chart
Details
helm install gitea gitea-charts/gitea --generate-nameUsing Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/gitea --generate-name --version 12.1.3for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link5. Minio has installed, if not check 🔗link1.prepare `chart-museum-credentials`
kubectl get namespaces application > /dev/null 2>&1 || kubectl create namespace application
kubectl -n application create secret generic gitea-admin-credentials \
--from-literal=username=gitea_admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
kubectl get namespaces application > /dev/null 2>&1 || kubectl create namespace application
kubectl -n application create secret generic gitea-admin-credentials \
--from-literal=username=gitea_admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.prepare `gitea.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: gitea
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://dl.gitea.com/charts/
chart: gitea
targetRevision: 10.1.4
helm:
releaseName: gitea
values: |
image:
registry: m.daocloud.io/docker.io
service:
http:
type: NodePort
port: 3000
nodePort: 30300
ssh:
type: NodePort
port: 22
nodePort: 32022
ingress:
enabled: true
ingressClassName: nginx
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /$1
cert-manager.io/cluster-issuer: self-signed-ca-issuer
hosts:
- host: gitea.ay.dev
paths:
- path: /?(.*)
pathType: ImplementationSpecific
tls:
- secretName: gitea.ay.dev-tls
hosts:
- gitea.ay.dev
persistence:
enabled: true
size: 8Gi
storageClass: ""
redis-cluster:
enabled: false
postgresql-ha:
enabled: false
postgresql:
enabled: true
architecture: standalone
image:
registry: m.daocloud.io/docker.io
primary:
persistence:
enabled: false
storageClass: ""
size: 8Gi
readReplicas:
replicaCount: 1
persistence:
enabled: true
storageClass: ""
size: 8Gi
backup:
enabled: false
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
gitea:
admin:
existingSecret: gitea-admin-credentials
email: aaron19940628@gmail.com
config:
database:
DB_TYPE: postgres
session:
PROVIDER: db
cache:
ADAPTER: memory
queue:
TYPE: level
indexer:
ISSUE_INDEXER_TYPE: bleve
REPO_INDEXER_ENABLED: true
repository:
MAX_CREATION_LIMIT: 10
DISABLED_REPO_UNITS: "repo.wiki,repo.ext_wiki,repo.projects"
DEFAULT_REPO_UNITS: "repo.code,repo.releases,repo.issues,repo.pulls"
server:
PROTOCOL: http
LANDING_PAGE: login
DOMAIN: gitea.ay.dev
ROOT_URL: https://gitea.ay.dev:32443/
SSH_DOMAIN: ssh.gitea.ay.dev
SSH_PORT: 32022
SSH_AUTHORIZED_PRINCIPALS_ALLOW: email
admin:
DISABLE_REGULAR_ORG_CREATION: true
security:
INSTALL_LOCK: true
service:
REGISTER_EMAIL_CONFIRM: true
DISABLE_REGISTRATION: true
ENABLE_NOTIFY_MAIL: false
DEFAULT_ALLOW_CREATE_ORGANIZATION: false
SHOW_MILESTONES_DASHBOARD_PAGE: false
migrations:
ALLOW_LOCALNETWORKS: true
mailer:
ENABLED: false
i18n:
LANGS: "en-US,zh-CN"
NAMES: "English,简体中文"
oauth2:
ENABLE: false
destination:
server: https://kubernetes.default.svc
namespace: application
sssss
3.apply to k8s
Details
kubectl -n argocd apply -f gitea.yaml4.sync by argocd
Details
argocd app sync argocd/gitea5.decode admin password
login 🔗https://gitea.ay.dev:32443/, using user gitea_admin and passwordDetails
kubectl -n application get secret gitea-admin-credentials -o jsonpath='{.data.password}' | base64 -dFAQ
Install GitLab
HPC
Monitor
Subsections of Monitor
Install Homepage
Offical Documentation: https://gethomepage.dev/
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.install chart directly
Details
helm install homepage oci://ghcr.io/m0nsterrr/helm-charts/homepage2.you can modify the values.yaml and re-install
Details
helm install homepage oci://ghcr.io/m0nsterrr/helm-charts/homepage -f homepage.values.yamlUsing Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/homepage --generate-name --version 4.2.0for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link1.prepare `homepage.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: homepage
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
project: default
source:
repoURL: oci://ghcr.io/m0nsterrr/helm-charts/homepage
chart: homepage
targetRevision: 4.2.0
helm:
releaseName: homepage
values: |
image:
registry: m.daocloud.io/ghcr.io
repository: gethomepage/homepage
pullPolicy: IfNotPresent
tag: "v1.5.0"
config:
allowedHosts:
- "home.72602.online"
ingress:
enabled: true
ingressClassName: "nginx"
annotations:
kubernetes.io/ingress.class: nginx
hosts:
- host: home.72602.online
paths:
- path: /
pathType: ImplementationSpecific
resources:
limits:
cpu: 500m
memory: 512Mi
requests:
cpu: 100m
memory: 128Mi
destination:
server: https://kubernetes.default.svc
namespace: monitor
EOF3.sync by argocd
Details
argocd app sync argocd/homepage5.check the web browser
Details
K8S_MASTER_IP=$(kubectl get nodes --selector=node-role.kubernetes.io/control-plane -o jsonpath='{$.items[0].status.addresses[?(@.type=="InternalIP")].address}')
echo "$K8S_MASTER_IP home.72602.online" >> /etc/hostsPreliminary
1. Kubernetes has installed, if not check 🔗link2. Docker has installed, if not check 🔗linkdocker run -d \
--name homepage \
-e HOMEPAGE_ALLOWED_HOSTS=47.110.67.161:3000 \
-e PUID=1000 \
-e PGID=1000 \
-p 3000:3000 \
-v /root/home-site/static/icons:/app/public/icons \
-v /root/home-site/content/Ops/HomePage/config:/app/config \
-v /var/run/docker.sock:/var/run/docker.sock:ro \
--restart unless-stopped \
ghcr.io/gethomepage/homepage:v1.5.0Preliminary
1. Kubernetes has installed, if not check 🔗link2. Podman has installed, if not check 🔗linkpodman run -d \
--name homepage \
-e HOMEPAGE_ALLOWED_HOSTS=127.0.0.1:3000 \
-e PUID=1000 \
-e PGID=1000 \
-p 3000:3000 \
-v /root/home-site/static/icons:/app/public/icons \
-v /root/home-site/content/Ops/HomePage/config:/app/config \
--restart unless-stopped \
ghcr.io/gethomepage/homepage:v1.5.0FAQ
Install Permetheus Stack
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-nameUsing Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/kube-prometheus-stack --generate-name --version 1.17.2for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link1.prepare `chart-museum-credentials`
Details
kubectl get namespaces monitor > /dev/null 2>&1 || kubectl create namespace monitor
kubectl -n monitor create secret generic prometheus-stack-credentials \
--from-literal=grafana-username=admin \
--from-literal=grafana-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)2.prepare `prometheus-stack.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: prometheus-stack
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
- ServerSideApply=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: kube-prometheus-stack
targetRevision: 72.9.1
helm:
releaseName: prometheus-stack
values: |
crds:
enabled: true
global:
rbac:
create: true
imageRegistry: ""
imagePullSecrets: []
alertmanager:
enabled: true
ingress:
enabled: false
serviceMonitor:
selfMonitor: true
interval: ""
alertmanagerSpec:
image:
registry: m.daocloud.io/quay.io
repository: prometheus/alertmanager
tag: v0.28.1
replicas: 1
resources: {}
storage:
volumeClaimTemplate:
spec:
storageClassName: ""
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 2Gi
grafana:
enabled: true
ingress:
enabled: true
annotations:
cert-manager.io/clusterissuer: self-signed-issuer
kubernetes.io/ingress.class: nginx
hosts:
- grafana.dev.tech
path: /
pathtype: ImplementationSpecific
tls:
- secretName: grafana.dev.tech-tls
hosts:
- grafana.dev.tech
prometheusOperator:
admissionWebhooks:
patch:
resources: {}
image:
registry: m.daocloud.io/registry.k8s.io
repository: ingress-nginx/kube-webhook-certgen
tag: v1.5.3
image:
registry: m.daocloud.io/quay.io
repository: prometheus-operator/prometheus-operator
prometheusConfigReloader:
image:
registry: m.daocloud.io/quay.io
repository: prometheus-operator/prometheus-config-reloader
resources: {}
thanosImage:
registry: m.daocloud.io/quay.io
repository: thanos/thanos
tag: v0.38.0
prometheus:
enabled: true
ingress:
enabled: true
annotations:
cert-manager.io/clusterissuer: self-signed-issuer
kubernetes.io/ingress.class: nginx
hosts:
- prometheus.dev.tech
path: /
pathtype: ImplementationSpecific
tls:
- secretName: prometheus.dev.tech-tls
hosts:
- prometheus.dev.tech
prometheusSpec:
image:
registry: m.daocloud.io/quay.io
repository: prometheus/prometheus
tag: v3.4.0
replicas: 1
shards: 1
resources: {}
storageSpec:
volumeClaimTemplate:
spec:
storageClassName: ""
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 2Gi
thanosRuler:
enabled: false
ingress:
enabled: false
thanosRulerSpec:
replicas: 1
storage: {}
resources: {}
image:
registry: m.daocloud.io/quay.io
repository: thanos/thanos
tag: v0.38.0
destination:
server: https://kubernetes.default.svc
namespace: monitor
EOF3.sync by argocd
Details
argocd app sync argocd/prometheus-stack4.extract clickhouse admin credentials
Details
kubectl -n monitor get secret prometheus-stack-credentials -o jsonpath='{.data.grafana-password}' | base64 -d5.check the web browser
Details
> add `$K8S_MASTER_IP grafana.dev.tech` to **/etc/hosts**
> add `$K8S_MASTER_IP prometheus.dev.tech` to **/etc/hosts**install based on docker
echo "start from head is important"FAQ
Networking
Subsections of Networking
Install Cert Manager
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link1.get helm repo
Details
helm repo add cert-manager-repo https://charts.jetstack.io
helm repo update2.install chart
Details
helm install cert-manager-repo/cert-manager --generate-name --version 1.17.2Using Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/cert-manager --generate-name --version 1.17.2for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link1.prepare `cert-manager.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: cert-manager
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: cert-manager
targetRevision: 1.17.2
helm:
releaseName: cert-manager
values: |
installCRDs: true
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-controller
tag: v1.17.2
webhook:
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-webhook
tag: v1.17.2
cainjector:
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-cainjector
tag: v1.17.2
acmesolver:
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-acmesolver
tag: v1.17.2
startupapicheck:
image:
repository: m.daocloud.io/quay.io/jetstack/cert-manager-startupapicheck
tag: v1.17.2
destination:
server: https://kubernetes.default.svc
namespace: basic-components
EOF3.sync by argocd
Details
argocd app sync argocd/cert-manager4.prepare self-signed.yaml
Details
kubectl apply -f - <<EOF
---
apiVersion: cert-manager.io/v1
kind: Issuer
metadata:
namespace: basic-components
name: self-signed-issuer
spec:
selfSigned: {}
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
namespace: basic-components
name: my-self-signed-ca
spec:
isCA: true
commonName: my-self-signed-ca
secretName: root-secret
privateKey:
algorithm: ECDSA
size: 256
issuerRef:
name: self-signed-issuer
kind: Issuer
group: cert-manager.io
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: self-signed-ca-issuer
spec:
ca:
secretName: root-secret
EOFPreliminary
1. Docker|Podman|Buildah has installed, if not check 🔗link1.just run
Details
docker run --name cert-manager -e ALLOW_EMPTY_PASSWORD=yes bitnami/cert-manager:latestUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
docker run --name cert-manager \
-e ALLOW_EMPTY_PASSWORD=yes
m.daocloud.io/docker.io/bitnami/cert-manager:latestPreliminary
1. Kubernetes has installed, if not check 🔗link1.just run
Details
kubectl create -f https://github.com/jetstack/cert-manager/releases/download/v1.17.2/cert-manager.yamlFAQ
Install HAProxy
Install Ingress
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
Details
helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx
helm repo update2.install chart
Details
helm install ingress-nginx/ingress-nginx --generate-nameUsing Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts &&
helm install ay-helm-mirror/ingress-nginx --generate-name --version 4.11.3for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. argoCD has installed, if not check 🔗link1.prepare `ingress-nginx.yaml`
Details
kubectl -n argocd apply -f - <<EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress-nginx
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://kubernetes.github.io/ingress-nginx
chart: ingress-nginx
targetRevision: 4.12.3
helm:
releaseName: ingress-nginx
values: |
controller:
image:
registry: m.daocloud.io/registry.k8s.io
service:
enabled: true
type: NodePort
nodePorts:
http: 32080
https: 32443
tcp:
8080: 32808
resources:
requests:
cpu: 100m
memory: 128Mi
admissionWebhooks:
enabled: true
patch:
enabled: true
image:
registry: m.daocloud.io/registry.k8s.io
metrics:
enabled: false
defaultBackend:
enabled: false
image:
registry: m.daocloud.io/registry.k8s.io
destination:
server: https://kubernetes.default.svc
namespace: basic-components
EOF[Optional] 2.apply to k8s
Details
kubectl -n argocd apply -f ingress-nginx.yaml3.sync by argocd
Details
argocd app sync argocd/ingress-nginxFAQ
Install Istio
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-nameUsing Proxy
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare `deploy-istio-base.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: istio-base
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://istio-release.storage.googleapis.com/charts
chart: base
targetRevision: 1.23.2
helm:
releaseName: istio-base
values: |
defaults:
global:
istioNamespace: istio-system
base:
enableCRDTemplates: false
enableIstioConfigCRDs: true
defaultRevision: "default"
destination:
server: https://kubernetes.default.svc
namespace: istio-system
EOF2.sync by argocd
Details
argocd app sync argocd/istio-base3.prepare `deploy-istiod.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: istiod
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://istio-release.storage.googleapis.com/charts
chart: istiod
targetRevision: 1.23.2
helm:
releaseName: istiod
values: |
defaults:
global:
istioNamespace: istio-system
defaultResources:
requests:
cpu: 10m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
hub: m.daocloud.io/docker.io/istio
proxy:
autoInject: disabled
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
pilot:
autoscaleEnabled: true
resources:
requests:
cpu: 500m
memory: 2048Mi
cpu:
targetAverageUtilization: 80
podAnnotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
destination:
server: https://kubernetes.default.svc
namespace: istio-system
EOF4.sync by argocd
Details
argocd app sync argocd/istiod5.prepare `deploy-istio-ingressgateway.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: istio-ingressgateway
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://istio-release.storage.googleapis.com/charts
chart: gateway
targetRevision: 1.23.2
helm:
releaseName: istio-ingressgateway
values: |
defaults:
replicaCount: 1
podAnnotations:
inject.istio.io/templates: "gateway"
sidecar.istio.io/inject: "true"
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
service:
type: LoadBalancer
ports:
- name: status-port
port: 15021
protocol: TCP
targetPort: 15021
- name: http2
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 5
destination:
server: https://kubernetes.default.svc
namespace: istio-system
EOF6.sync by argocd
Details
argocd app sync argocd/istio-ingressgatewayPreliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database2.apply rolebinding to k8s
Details
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF4.prepare `deploy-xxxx-flow.yaml`
Details
6.subimit to argo workflow client
Details
argo -n business-workflows submit deploy-xxxx-flow.yaml7.decode password
Details
kubectl -n application get secret xxxx-credentials -o jsonpath='{.data.xxx-password}' | base64 -dFAQ
Install Nginx
1. prepare server.conf
cat << EOF > default.conf
server {
listen 80;
location / {
root /usr/share/nginx/html;
autoindex on;
}
}
EOF2. install
mkdir $(pwd)/data
podman run --rm -p 8080:80 \
-v $(pwd)/data:/usr/share/nginx/html:ro \
-v $(pwd)/default.conf:/etc/nginx/conf.d/default.conf:ro \
-d docker.io/library/nginx:1.19.9-alpine
echo 'this is a test' > $(pwd)/data/some-data.txtTip
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
visit http://localhost:8080
RPC
Subsections of RPC
gRpc
This guide gets you started with gRPC in C++ with a simple working example.
In the C++ world, there’s no universally accepted standard for managing project dependencies. You need to build and install gRPC before building and running this quick start’s Hello World example.
Build and locally install gRPC and Protocol Buffers. The steps in the section explain how to build and locally install gRPC and Protocol Buffers using cmake. If you’d rather use bazel, see Building from source.
1. Setup
Choose a directory to hold locally installed packages. This page assumes that the environment variable MY_INSTALL_DIR holds this directory path. For example:
export MY_INSTALL_DIR=$HOME/.localEnsure that the directory exists:
mkdir -p $MY_INSTALL_DIRAdd the local bin folder to your path variable, for example:
export PATH="$MY_INSTALL_DIR/bin:$PATH"Important
We strongly encourage you to install gRPC locally — using an appropriately set CMAKE_INSTALL_PREFIX
— because there is no easy way to uninstall gRPC after you’ve installed it globally.
2. Install Essentials
2.1 Install Cmake
You need version 3.13 or later of cmake. Install it by following these instructions:
sudo apt install -y cmakebrew install cmake2.2 Install basic tools required to build gRPC
sudo apt install -y build-essential autoconf libtool pkg-configbrew install autoconf automake libtool pkg-config2.3 Clone the grpc repo
Clone the grpc repo and its submodules:
git clone --recurse-submodules -b v1.62.0 --depth 1 --shallow-submodules https://github.com/grpc/grpc2.4 Build and install gRPC and Protocol Buffers
While not mandatory, gRPC applications usually leverage Protocol Buffers for service definitions and data serialization, and the example code uses proto3.
The following commands build and locally install gRPC and Protocol Buffers:
cd grpc
mkdir -p cmake/build
pushd cmake/build
cmake -DgRPC_INSTALL=ON \
-DgRPC_BUILD_TESTS=OFF \
-DCMAKE_INSTALL_PREFIX=$MY_INSTALL_DIR \
../..
make -j 4
make install
popd3. Run the example
The example code is part of the grpc repo source, which you cloned as part of the steps of the previous section.
3.1 change the example’s directory:
cd examples/cpp/helloworld3.2 build the example project by using cmake
make sure you still can
echo $MY_INSTALL_DIR, and return a valid result
mkdir -p cmake/build
pushd cmake/build
cmake -DCMAKE_PREFIX_PATH=$MY_INSTALL_DIR ../..
make -j 43.3 run the server
./greeter_server3.4 from a different terminal, run the client and see the client output:
./greeter_clientand the result should be like this:
Greeter received: Hello worldStorage
Subsections of Storage
Deploy Artifict Repository
Preliminary
- Kubernetes has installed, if not check link
- minio is ready for artifact repository
endpoint:
minio.storage:9000
Steps
1. prepare bucket for s3 artifact repository
# K8S_MASTER_IP could be you master ip or loadbalancer external ip
K8S_MASTER_IP=172.27.253.27
MINIO_ACCESS_SECRET=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.rootPassword}' | base64 -d)
podman run --rm \
--entrypoint bash \
--add-host=minio-api.dev.geekcity.tech:${K8S_MASTER_IP} \
-it docker.io/minio/mc:latest \
-c "mc alias set minio http://minio-api.dev.geekcity.tech admin ${MINIO_ACCESS_SECRET} \
&& mc ls minio \
&& mc mb --ignore-existing minio/argo-workflows-artifacts"2. prepare secret s3-artifact-repository-credentials
will create business-workflows namespace
MINIO_ACCESS_KEY=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.rootUser}' | base64 -d)
kubectl -n business-workflows create secret generic s3-artifact-repository-credentials \
--from-literal=accessKey=${MINIO_ACCESS_KEY} \
--from-literal=secretKey=${MINIO_ACCESS_SECRET}3. prepare configMap artifact-repositories.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: artifact-repositories
annotations:
workflows.argoproj.io/default-artifact-repository: default-artifact-repository
data:
default-artifact-repository: |
s3:
endpoint: minio.storage:9000
insecure: true
accessKeySecret:
name: s3-artifact-repository-credentials
key: accessKey
secretKeySecret:
name: s3-artifact-repository-credentials
key: secretKey
bucket: argo-workflows-artifacts4. apply artifact-repositories.yaml to k8s
kubectl -n business-workflows apply -f artifact-repositories.yamlInstall Chart Museum
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm binary has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-nameUsing Mirror
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts \
&& helm install ay-helm-mirror/cert-manager --generate-name --version 1.17.2for more information, you can check 🔗https://aaronyang0628.github.io/helm-chart-mirror/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Helm binary has installed, if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link5. Minio has installed, if not check 🔗link1.prepare `chart-museum-credentials`
kubectl get namespaces basic-components > /dev/null 2>&1 || kubectl create namespace basic-components
kubectl -n basic-components create secret generic chart-museum-credentials \
--from-literal=username=admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
kubectl get namespaces basic-components > /dev/null 2>&1 || kubectl create namespace basic-components
kubectl -n basic-components create secret generic chart-museum-credentials \
--from-literal=username=admin \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=aws_access_key_id=$(kubectl -n storage get secret minio-credentials -o jsonpath='{.data.rootUser}' | base64 -d) \
--from-literal=aws_secret_access_key=$(kubectl -n storage get secret minio-credentials -o jsonpath='{.data.rootPassword}' | base64 -d)
2.prepare `chart-museum.yaml`
kubectl apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: chart-museum
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://chartmuseum.github.io/charts
chart: chartmuseum
targetRevision: 3.10.3
helm:
releaseName: chart-museum
values: |
replicaCount: 1
image:
repository: m.daocloud.io/ghcr.io/helm/chartmuseum
env:
open:
DISABLE_API: false
STORAGE: local
AUTH_ANONYMOUS_GET: true
existingSecret: "chart-museum-credentials"
existingSecretMappings:
BASIC_AUTH_USER: "username"
BASIC_AUTH_PASS: "password"
persistence:
enabled: false
storageClass: ""
volumePermissions:
image:
registry: m.daocloud.io/docker.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hosts:
- name: chartmuseum.ay.dev
path: /?(.*)
tls: true
tlsSecret: chartmuseum.ay.dev-tls
destination:
server: https://kubernetes.default.svc
namespace: basic-components
EOF
kubectl apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: chart-museum
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://chartmuseum.github.io/charts
chart: chartmuseum
targetRevision: 3.10.3
helm:
releaseName: chart-museum
values: |
replicaCount: 1
image:
repository: m.daocloud.io/ghcr.io/helm/chartmuseum
env:
open:
DISABLE_API: false
STORAGE: amazon
STORAGE_AMAZON_ENDPOINT: http://minio-api.ay.dev:32080
STORAGE_AMAZON_BUCKET: chart-museum
STORAGE_AMAZON_PREFIX: charts
STORAGE_AMAZON_REGION: us-east-1
AUTH_ANONYMOUS_GET: true
existingSecret: "chart-museum-credentials"
existingSecretMappings:
BASIC_AUTH_USER: "username"
BASIC_AUTH_PASS: "password"
AWS_ACCESS_KEY_ID: "aws_access_key_id"
AWS_SECRET_ACCESS_KEY: "aws_secret_access_key"
persistence:
enabled: false
storageClass: ""
volumePermissions:
image:
registry: m.daocloud.io/docker.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hosts:
- name: chartmuseum.ay.dev
path: /?(.*)
tls: true
tlsSecret: chartmuseum.ay.dev-tls
destination:
server: https://kubernetes.default.svc
namespace: basic-components
EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: chart-museum
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://chartmuseum.github.io/charts
chart: chartmuseum
targetRevision: 3.10.3
helm:
releaseName: chart-museum
values: |
replicaCount: 1
image:
repository: m.daocloud.io/ghcr.io/helm/chartmuseum
env:
open:
DISABLE_API: false
STORAGE: local
AUTH_ANONYMOUS_GET: true
existingSecret: "chart-museum-credentials"
existingSecretMappings:
BASIC_AUTH_USER: "username"
BASIC_AUTH_PASS: "password"
persistence:
enabled: false
storageClass: ""
volumePermissions:
image:
registry: m.daocloud.io/docker.io
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hosts:
- name: chartmuseum.ay.dev
path: /?(.*)
tls: true
tlsSecret: chartmuseum.ay.dev-tls
destination:
server: https://kubernetes.default.svc
namespace: basic-components
3.sync by argocd
Details
argocd app sync argocd/chart-museumUploading a Chart Package
Follow “How to Run” section below to get ChartMuseum up and running at http://localhost:8080
First create mychart-0.1.0.tgz using the Helm CLI:
cd mychart/
helm package .Upload mychart-0.1.0.tgz:
curl --data-binary "@mychart-0.1.0.tgz" http://localhost:8080/api/chartsIf you’ve signed your package and generated a provenance file, upload it with:
curl --data-binary "@mychart-0.1.0.tgz.prov" http://localhost:8080/api/provBoth files can also be uploaded at once (or one at a time) on the /api/charts route using the multipart/form-data format:
curl -F "chart=@mychart-0.1.0.tgz" -F "prov=@mychart-0.1.0.tgz.prov" http://localhost:8080/api/chartsYou can also use the helm-push plugin:
helm cm-push mychart/ chartmuseumInstalling Charts into Kubernetes
Add the URL to your ChartMuseum installation to the local repository list:
helm repo add chartmuseum http://localhost:8080Search for charts:
helm search repo chartmuseum/Install chart:
helm install chartmuseum/mychart --generate-nameFAQ
Install Harbor
Install JuiceFS
Install Minio
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Ingres has installed on argoCD, if not check 🔗link4. Cert-manager has installed on argocd and the clusterissuer has a named `self-signed-ca-issuer`service, , if not check 🔗link1.prepare minio credentials secret
Details
kubectl get namespaces storage > /dev/null 2>&1 || kubectl create namespace storage
kubectl -n storage create secret generic minio-secret \
--from-literal=root-user=admin \
--from-literal=root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)2.prepare `deploy-minio.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: minio
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: minio
targetRevision: 16.0.10
helm:
releaseName: minio
values: |
global:
imageRegistry: "m.daocloud.io/docker.io"
imagePullSecrets: []
storageClass: ""
security:
allowInsecureImages: true
compatibility:
openshift:
adaptSecurityContext: auto
image:
registry: m.daocloud.io/docker.io
repository: bitnami/minio
clientImage:
registry: m.daocloud.io/docker.io
repository: bitnami/minio-client
mode: standalone
defaultBuckets: ""
auth:
# rootUser: admin
# rootPassword: ""
existingSecret: "minio-secret"
statefulset:
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
replicaCount: 1
zones: 1
drivesPerNode: 1
resourcesPreset: "micro"
resources:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 512Mi
cpu: 250m
ingress:
enabled: true
ingressClassName: "nginx"
hostname: minio-console.ay.online
path: /?(.*)
pathType: ImplementationSpecific
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /$1
cert-manager.io/cluster-issuer: self-signed-ca-issuer
tls: true
selfSigned: true
extraHosts: []
apiIngress:
enabled: true
ingressClassName: "nginx"
hostname: minio-api.ay.online
path: /?(.*)
pathType: ImplementationSpecific
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /$1
cert-manager.io/cluster-issuer: self-signed-ca-issuer
tls: true
selfSigned: true
extraHosts: []
persistence:
enabled: false
storageClass: ""
mountPath: /bitnami/minio/data
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
existingClaim: ""
metrics:
prometheusAuthType: public
enabled: false
serviceMonitor:
enabled: false
namespace: ""
labels: {}
jobLabel: ""
paths:
- /minio/v2/metrics/cluster
- /minio/v2/metrics/node
interval: 30s
scrapeTimeout: ""
honorLabels: false
prometheusRule:
enabled: false
namespace: ""
additionalLabels: {}
rules: []
destination:
server: https://kubernetes.default.svc
namespace: storage
EOF3.sync by argocd
Details
argocd app sync argocd/minio4.decode minio secret
Details
kubectl -n storage get secret minio-secret -o jsonpath='{.data.root-password}' | base64 -d5.visit web console
Login Credentials
add $K8S_MASTER_IP minio-console.ay.online to /etc/hosts
address: 🔗http://minio-console.ay.online:32080/login
access key:
admin
secret key: ``
6.using mc
Details
K8S_MASTER_IP=$(kubectl get node -l node-role.kubernetes.io/control-plane -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
MINIO_ACCESS_SECRET=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.root-password}' | base64 -d)
podman run --rm \
--entrypoint bash \
--add-host=minio-api.dev.tech:${K8S_MASTER_IP} \
-it m.daocloud.io/docker.io/minio/mc:latest \
-c "mc alias set minio http://minio-api.dev.tech:32080 admin ${MINIO_ACCESS_SECRET} \
&& mc ls minio \
&& mc mb --ignore-existing minio/test \
&& mc cp /etc/hosts minio/test/etc/hosts \
&& mc ls --recursive minio"Details
K8S_MASTER_IP=$(kubectl get node -l node-role.kubernetes.io/control-plane -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
MINIO_ACCESS_SECRET=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.root-password}' | base64 -d)
podman run --rm \
--entrypoint bash \
--add-host=minio-api.dev.tech:${K8S_MASTER_IP} \
-it m.daocloud.io/docker.io/minio/mc:latestPreliminary
1. Docker has installed, if not check 🔗linkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
mkdir -p $(pwd)/minio/data
podman run --rm \
--name minio-server \
-p 9000:9000 \
-p 9001:9001 \
-v $(pwd)/minio/data:/data \
-d docker.io/minio/minio:latest server /data --console-address :90012.use web console
And then you can visit 🔗http://localhost:9001username: `minioadmin`
password: `minioadmin`
3.use internal client
Details
podman run --rm \
--entrypoint bash \
-it docker.io/minio/mc:latest \
-c "mc alias set minio http://host.docker.internal:9000 minioadmin minioadmin \
&& mc ls minio \
&& mc mb --ignore-existing minio/test \
&& mc cp /etc/hosts minio/test/etc/hosts \
&& mc ls --recursive minio"FAQ
Install NFS
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. argoCD has installed, if not check 🔗link3. ingres has installed on argoCD, if not check 🔗link1.prepare `nfs-provisioner.yaml`
Details
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nfs-provisioner
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
chart: nfs-subdir-external-provisioner
targetRevision: 4.0.18
helm:
releaseName: nfs-provisioner
values: |
image:
repository: m.daocloud.io/registry.k8s.io/sig-storage/nfs-subdir-external-provisioner
pullPolicy: IfNotPresent
nfs:
server: nfs.services.test
path: /
mountOptions:
- vers=4
- minorversion=0
- rsize=1048576
- wsize=1048576
- hard
- timeo=600
- retrans=2
- noresvport
volumeName: nfs-subdir-external-provisioner-nas
reclaimPolicy: Retain
storageClass:
create: true
defaultClass: true
name: nfs-external-nas
destination:
server: https://kubernetes.default.svc
namespace: storage3.deploy mariadb
Details
kubectl -n argocd apply -f nfs-provisioner.yaml4.sync by argocd
Details
argocd app sync argocd/nfs-provisionerPreliminary
1. Docker has installed, if not check 🔗linkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
echo -e "nfs\nnfsd" > /etc/modules-load.d/nfs4.conf
modprobe nfs && modprobe nfsd
mkdir -p $(pwd)/data/nfs/data
echo '/data *(rw,fsid=0,no_subtree_check,insecure,no_root_squash)' > $(pwd)/data/nfs/exports
podman run \
--name nfs4 \
--rm \
--privileged \
-p 2049:2049 \
-v $(pwd)/data/nfs/data:/data \
-v $(pwd)/data/nfs/exports:/etc/exports:ro \
-d docker.io/erichough/nfs-server:2.2.1Preliminary
1. centos yum repo source has updated, if not check 🔗link2.1.install nfs util
sudo apt update -y
sudo apt-get install nfs-commondnf update -y
dnf install -y nfs-utils rpcbindnsudo apt update -y
sudo apt-get install nfs-common2. create share folder
Details
mkdir /data && chmod 755 /data3.edit `/etc/exports`
Details
/data *(rw,sync,insecure,no_root_squash,no_subtree_check)4.start nfs server
Details
systemctl enable rpcbind
systemctl enable nfs-server
systemctl start rpcbind
systemctl start nfs-server5.test load on localhost
Details
showmount -e localhost6.test load on other ip
Details
showmount -e 192.168.aa.bb7.mount nfs disk
Details
mkdir -p $(pwd)/mnt/nfs
sudo mount -v 192.168.aa.bb:/data $(pwd)/mnt/nfs -o proto=tcp -o nolock8.set nfs auto mount
Details
echo "192.168.aa.bb:/data /data nfs rw,auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0" >> /etc/fstab
df -hNotes
[Optional] create new partition
fdisk /dev/vdb
# n
# p
# wparted
#select /dev/vdb
#mklabel gpt
#mkpart primary 0 -1
#Cancel
#mkpart primary 0% 100%
#print[Optional]Format disk
mkfs.xfs /dev/vdb1 -f[Optional] mount disk to folder
mount /dev/vdb1 /data[Optional] mount when restart
#vim `/etc/fstab`
/dev/vdb1 /data xfs defaults 0 0FAQ
Install Rook Ceph
Install Reids
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-nameUsing Proxy
for more information, you can check 🔗https://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link1.prepare redis secret
Details
kubectl get namespaces storage > /dev/null 2>&1 || kubectl create namespace storage
kubectl -n storage create secret generic redis-credentials \
--from-literal=redis-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)2.prepare `deploy-redis.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: redis
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: redis
targetRevision: 18.16.0
helm:
releaseName: redis
values: |
architecture: replication
auth:
enabled: true
sentinel: true
existingSecret: redis-credentials
master:
count: 1
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: true
storageClass: nfs-external
size: 8Gi
replica:
replicaCount: 3
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: true
storageClass: nfs-external
size: 8Gi
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sentinel:
enabled: false
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sysctl:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
extraDeploy:
- |
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-tool
namespace: csst
labels:
app.kubernetes.io/name: redis-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-tool
template:
metadata:
labels:
app.kubernetes.io/name: redis-tool
spec:
containers:
- name: redis-tool
image: m.daocloud.io/docker.io/bitnami/redis:7.2.4-debian-12-r8
imagePullPolicy: IfNotPresent
env:
- name: REDISCLI_AUTH
valueFrom:
secretKeyRef:
key: redis-password
name: redis-credentials
- name: TZ
value: Asia/Shanghai
command:
- tail
- -f
- /etc/hosts
destination:
server: https://kubernetes.default.svc
namespace: storage
EOF3.sync by argocd
Details
argocd app sync argocd/redis4.decode password
Details
kubectl -n storage get secret redis-credentials -o jsonpath='{.data.redis-password}' | base64 -dPreliminary
1. Docker|Podman|Buildah has installed, if not check 🔗linkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. ArgoCD has installed, if not check 🔗link4. Argo Workflow has installed, if not check 🔗link1.prepare `argocd-login-credentials`
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database2.apply rolebinding to k8s
Details
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF4.prepare `deploy-xxxx-flow.yaml`
Details
6.subimit to argo workflow client
Details
argo -n business-workflows submit deploy-xxxx-flow.yaml7.decode password
Details
kubectl -n application get secret xxxx-credentials -o jsonpath='{.data.xxx-password}' | base64 -dFAQ
tests
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage pingkubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage set mykey somevaluekubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage get mykeykubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage del mykeykubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage get mykey
Streaming
Subsections of Streaming
Install Flink Operator
Installation
Preliminary
1. Kubernetes has installed, if not check 🔗link2. Helm has installed, if not check 🔗link3. Cert-manager has installed, if not check 🔗link1.get helm repo
Details
helm repo add flink-operator-repo https://downloads.apache.org/flink/flink-kubernetes-operator-1.11.0/
helm repo updatelatest version : 🔗https://flink.apache.org/downloads/#apache-flink-kubernetes-operator
2.install chart
Details
helm install --create-namespace -n flink flink-kubernetes-operator flink-operator-repo/flink-kubernetes-operator --set image.repository=m.lab.zverse.space/ghcr.io/apache/flink-kubernetes-operator --set image.tag=1.11.0 --set webhook.create=falseReference
for more information, you can check 🔗https://nightlies.apache.org/flink/flink-kubernetes-operator-docs-main/zh/docs/try-flink-kubernetes-operator/quick-start/
Preliminary
1. Kubernetes has installed, if not check 🔗link2. ArgoCD has installed, if not check 🔗link3. Cert-manager has installed on argocd and the clusterissuer has a named self-signed-ca-issuer service , if not check 🔗link4. Ingres has installed on argoCD, if not check 🔗link2.prepare `flink-operator.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: flink-operator
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://downloads.apache.org/flink/flink-kubernetes-operator-1.11.0
chart: flink-kubernetes-operator
targetRevision: 1.11.0
helm:
releaseName: flink-operator
values: |
image:
repository: m.daocloud.io/ghcr.io/apache/flink-kubernetes-operator
pullPolicy: IfNotPresent
tag: "1.11.0"
version: v3
destination:
server: https://kubernetes.default.svc
namespace: flink
EOF3.sync by argocd
Details
argocd app sync argocd/flink-operator


