Subsections of Database
Install Clickhouse
Installation
Preliminary
1. Kubernetes has installed, if not check πlink2. argoCD has installed, if not check πlink3. cert-manager has installed on argocd and the clusterissuer has a named `self-signed-ca-issuer`service, , if not check πlink1.prepare admin credentials secret
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic clickhouse-admin-credentials \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.prepare `deploy-clickhouse.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: clickhouse
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: clickhouse
targetRevision: 4.5.1
helm:
releaseName: clickhouse
values: |
serviceAccount:
name: clickhouse
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
zookeeper:
enabled: true
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
replicaCount: 3
persistence:
enabled: true
storageClass: nfs-external
size: 8Gi
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
shards: 2
replicaCount: 3
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hostname: clickhouse.dev.geekcity.tech
ingressClassName: nginx
path: /?(.*)
tls: true
persistence:
enabled: false
resources:
requests:
cpu: 2
memory: 512Mi
limits:
cpu: 3
memory: 1024Mi
auth:
username: admin
existingSecret: clickhouse-admin-credentials
existingSecretKey: password
metrics:
enabled: true
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
serviceMonitor:
enabled: true
namespace: monitor
jobLabel: clickhouse
selector:
app.kubernetes.io/name: clickhouse
app.kubernetes.io/instance: clickhouse
labels:
release: prometheus-stack
extraDeploy:
- |
apiVersion: apps/v1
kind: Deployment
metadata:
name: clickhouse-tool
namespace: database
labels:
app.kubernetes.io/name: clickhouse-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: clickhouse-tool
template:
metadata:
labels:
app.kubernetes.io/name: clickhouse-tool
spec:
containers:
- name: clickhouse-tool
image: m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine
imagePullPolicy: IfNotPresent
env:
- name: CLICKHOUSE_USER
value: admin
- name: CLICKHOUSE_PASSWORD
valueFrom:
secretKeyRef:
key: password
name: clickhouse-admin-credentials
- name: CLICKHOUSE_HOST
value: csst-clickhouse.csst
- name: CLICKHOUSE_PORT
value: "9000"
- name: TZ
value: Asia/Shanghai
command:
- tail
args:
- -f
- /etc/hosts
destination:
server: https://kubernetes.default.svc
namespace: database
3.deploy clickhouse
kubectl -n argocd apply -f deploy-clickhouse.yaml
4.sync by argocd
argocd app sync argocd/clickhouse
5.prepare `clickhouse-interface.yaml`
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: clickhouse
name: clickhouse-interface
spec:
ports:
- name: http
port: 8123
protocol: TCP
targetPort: http
nodePort: 31567
- name: tcp
port: 9000
protocol: TCP
targetPort: tcp
nodePort: 32005
selector:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: clickhouse
app.kubernetes.io/name: clickhouse
type: NodePort
6.apply to k8s
kubectl -n database apply -f clickhouse-interface.yaml
7.extract clickhouse admin credentials
kubectl -n database get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d
8.invoke http api
add `$K8S_MASTER_IP clickhouse.dev.geekcity.tech` to **/etc/hosts**
CK_PASS=$(kubectl -n database get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d)
echo 'SELECT version()' | curl -k "https://admin:${CK_PASS}@clickhouse.dev.geekcity.tech:32443/" --data-binary @-
Preliminary
1. Docker has installed, if not check πlinkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p clickhouse/{data,logs}
podman run --rm \
--ulimit nofile=262144:262144 \
--name clickhouse-server \
-p 18123:8123 \
-p 19000:9000 \
-v $(pwd)/clickhouse/data:/var/lib/clickhouse \
-v $(pwd)/clickhouse/logs:/var/log/clickhouse-server \
-e CLICKHOUSE_DB=my_database \
-e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 \
-e CLICKHOUSE_USER=ayayay \
-e CLICKHOUSE_PASSWORD=123456 \
-d m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine
2.check dashboard
And then you can visit πhttp://localhost:181233.use cli api
And then you can visit πhttp://localhost:19000podman run --rm \
--entrypoint clickhouse-client \
-it m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine \
--host host.containers.internal \
--port 19000 \
--user ayayay \
--password 123456 \
--query "select version()"
4.use visual client
podman run --rm -p 8080:80 -d m.daocloud.io/docker.io/spoonest/clickhouse-tabix-web-client:stable
Preliminary
1. Kubernetes has installed, if not check πlink2. ArgoCD has installed, if not check πlink3. Argo Workflow has installed, if not check πlink1.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
4.prepare clickhouse admin credentials secret
kubectl get namespace application > /dev/null 2>&1 || kubectl create namespace application
kubectl -n application create secret generic clickhouse-admin-credentials \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
5.prepare deploy-clickhouse-flow.yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-ck-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-clickhouse
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: clickhouse
targetRevision: 4.5.3
helm:
releaseName: app-clickhouse
values: |
image:
registry: docker.io
repository: bitnami/clickhouse
tag: 23.12.3-debian-11-r0
pullPolicy: IfNotPresent
service:
type: ClusterIP
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
ingress:
enabled: true
ingressClassName: nginx
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
path: /?(.*)
hostname: clickhouse.dev.geekcity.tech
tls: true
shards: 2
replicaCount: 3
persistence:
enabled: false
auth:
username: admin
existingSecret: clickhouse-admin-credentials
existingSecretKey: password
zookeeper:
enabled: true
image:
registry: m.daocloud.io/docker.io
repository: bitnami/zookeeper
tag: 3.8.3-debian-11-r8
pullPolicy: IfNotPresent
replicaCount: 3
persistence:
enabled: false
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-clickhouse ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-clickhouse
6.subimit to argo workflow client
argo -n business-workflows submit deploy-clickhouse-flow.yaml
7.extract clickhouse admin credentials
kubectl -n application get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d
8.invoke http api
add `$K8S_MASTER_IP clickhouse.dev.geekcity.tech` to **/etc/hosts**
CK_PASSWORD=$(kubectl -n application get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d) && echo 'SELECT version()' | curl -k "https://admin:${CK_PASSWORD}@clickhouse.dev.geekcity.tech/" --data-binary @-
9.create external interface
kubectl -n application apply -f - <<EOF
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: app-clickhouse
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: clickhouse
app.kubernetes.io/version: 23.12.2
argocd.argoproj.io/instance: app-clickhouse
helm.sh/chart: clickhouse-4.5.3
name: app-clickhouse-service-external
spec:
ports:
- name: tcp
port: 9000
protocol: TCP
targetPort: tcp
nodePort: 30900
selector:
app.kubernetes.io/component: clickhouse
app.kubernetes.io/instance: app-clickhouse
app.kubernetes.io/name: clickhouse
type: NodePort
EOF
FAQ
Install ElasticSearch
Preliminary
- Kubernetes has installed, if not check πlink
- argoCD has installed, if not check πlink
- ingres has installed on argoCD, if not check πlink
- cert-manager has installed on argocd and the clusterissuer has a named
self-signed-ca-issuer
service, , if not check πlink
Steps
1. prepare elastic-search.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: elastic-search
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: elasticsearch
targetRevision: 19.11.3
helm:
releaseName: elastic-search
values: |
global:
kibanaEnabled: true
clusterName: elastic
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
security:
enabled: false
service:
type: ClusterIP
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hostname: elastic-search.dev.tech
ingressClassName: nginx
path: /?(.*)
tls: true
master:
masterOnly: false
replicaCount: 1
persistence:
enabled: false
resources:
requests:
cpu: 2
memory: 1024Mi
limits:
cpu: 4
memory: 4096Mi
heapSize: 2g
data:
replicaCount: 0
persistence:
enabled: false
coordinating:
replicaCount: 0
ingest:
enabled: true
replicaCount: 0
service:
enabled: false
type: ClusterIP
ingress:
enabled: false
metrics:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
sysctlImage:
enabled: true
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
kibana:
elasticsearch:
hosts:
- '{{ include "elasticsearch.service.name" . }}'
port: '{{ include "elasticsearch.service.ports.restAPI" . }}'
esJavaOpts: "-Xmx2g -Xms2g"
destination:
server: https://kubernetes.default.svc
namespace: application
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: elastic-search
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: elasticsearch
targetRevision: 19.11.3
helm:
releaseName: elastic-search
values: |
global:
kibanaEnabled: true
clusterName: elastic
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
security:
enabled: false
service:
type: ClusterIP
ingress:
enabled: true
annotations:
cert-manager.io/cluster-issuer: self-signed-ca-issuer
nginx.ingress.kubernetes.io/rewrite-target: /$1
hostname: elastic-search.dev.tech
ingressClassName: nginx
path: /?(.*)
tls: true
master:
masterOnly: false
replicaCount: 1
persistence:
enabled: false
resources:
requests:
cpu: 2
memory: 1024Mi
limits:
cpu: 4
memory: 4096Mi
heapSize: 2g
data:
replicaCount: 0
persistence:
enabled: false
coordinating:
replicaCount: 0
ingest:
enabled: true
replicaCount: 0
service:
enabled: false
type: ClusterIP
ingress:
enabled: false
metrics:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
sysctlImage:
enabled: true
registry: m.zjvis.net/docker.io
pullPolicy: IfNotPresent
kibana:
elasticsearch:
hosts:
- '{{ include "elasticsearch.service.name" . }}'
port: '{{ include "elasticsearch.service.ports.restAPI" . }}'
esJavaOpts: "-Xmx2g -Xms2g"
destination:
server: https://kubernetes.default.svc
namespace: application
3. apply to k8s
kubectl -n argocd apply -f elastic-search.yaml
4. sync by argocd
argocd app sync argocd/elastic-search
[Optional] Test REST API call
add
$K8S_MASTER_IP elastic-search.dev.tech
to/etc/hosts
curl -k "https://elastic-search.dev.tech:32443/?pretty"
[Optional] Add Single Document
curl -k -H "Content-Type: application/json" \
-X POST "https://elastic-search.dev.tech:32443/books/_doc?pretty" \
-d '{"name": "Snow Crash", "author": "Neal Stephenson", "release_date": "1992-06-01", "page_count": 470}'
Install Kafka
Installation
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm binary has installed, if not check πlink1.get helm repo
helm repo add bitnami oci://registry-1.docker.io/bitnamicharts/kafka
helm repo update
2.install chart
helm upgrade --create-namespace -n database kafka --install bitnami/kafka \
--set global.imageRegistry=m.daocloud.io/docker.io \
--set zookeeper.enabled=false \
--set controller.replicaCount=1 \
--set broker.replicaCount=1 \
--set persistance.enabled=false \
--version 28.0.3
helm upgrade --create-namespace -n database kafka --install bitnami/kafka \
--set global.imageRegistry=m.daocloud.io/docker.io \
--set zookeeper.enabled=false \
--set controller.replicaCount=1 \
--set broker.replicaCount=1 \
--set persistance.enabled=false \
--version 28.0.3
kubectl -n database \
create secret generic client-properties \
--from-literal=client.properties="$(printf "security.protocol=SASL_PLAINTEXT\nsasl.mechanism=SCRAM-SHA-256\nsasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username=\"user1\" password=\"$(kubectl get secret kafka-user-passwords --namespace database -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)\";\n")"
kubectl -n database apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-client-tools
labels:
app: kafka-client-tools
spec:
replicas: 1
selector:
matchLabels:
app: kafka-client-tools
template:
metadata:
labels:
app: kafka-client-tools
spec:
volumes:
- name: client-properties
secret:
secretName: client-properties
containers:
- name: kafka-client-tools
image: m.daocloud.io/docker.io/bitnami/kafka:3.6.2
volumeMounts:
- name: client-properties
mountPath: /bitnami/custom/client.properties
subPath: client.properties
readOnly: true
env:
- name: BOOTSTRAP_SERVER
value: kafka.database.svc.cluster.local:9092
- name: CLIENT_CONFIG_FILE
value: /bitnami/custom/client.properties
command:
- tail
- -f
- /etc/hosts
imagePullPolicy: IfNotPresent
EOF
3.validate function
- list topicskubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --list'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --create --if-not-exists --topic test-topic'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --describe --topic test-topic'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'for message in $(seq 0 10); do echo $message | kafka-console-producer.sh --bootstrap-server $BOOTSTRAP_SERVER --producer.config $CLIENT_CONFIG_FILE --topic test-topic; done'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-console-consumer.sh --bootstrap-server $BOOTSTRAP_SERVER --consumer.config $CLIENT_CONFIG_FILE --topic test-topic --from-beginning'
Preliminary
1. Kubernetes has installed, if not check πlink2. ArgoCD has installed, if not check πlink3. Helm binary has installed, if not check πlink1.prepare `deploy-kafka.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kafka
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: kafka
targetRevision: 28.0.3
helm:
releaseName: kafka
values: |
image:
registry: m.daocloud.io/docker.io
controller:
replicaCount: 1
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
broker:
replicaCount: 1
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
externalAccess:
enabled: false
autoDiscovery:
enabled: false
image:
registry: m.daocloud.io/docker.io
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
kafka:
enabled: false
image:
registry: m.daocloud.io/docker.io
jmx:
enabled: false
image:
registry: m.daocloud.io/docker.io
provisioning:
enabled: false
kraft:
enabled: true
zookeeper:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: database
EOF
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: kafka
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: kafka
targetRevision: 28.0.3
helm:
releaseName: kafka
values: |
image:
registry: m.daocloud.io/docker.io
listeners:
client:
protocol: PLAINTEXT
interbroker:
protocol: PLAINTEXT
controller:
replicaCount: 0
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
broker:
replicaCount: 1
minId: 0
persistence:
enabled: false
logPersistence:
enabled: false
extraConfig: |
message.max.bytes=5242880
default.replication.factor=1
offsets.topic.replication.factor=1
transaction.state.log.replication.factor=1
externalAccess:
enabled: false
autoDiscovery:
enabled: false
image:
registry: m.daocloud.io/docker.io
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
kafka:
enabled: false
image:
registry: m.daocloud.io/docker.io
jmx:
enabled: false
image:
registry: m.daocloud.io/docker.io
provisioning:
enabled: false
kraft:
enabled: false
zookeeper:
enabled: true
image:
registry: m.daocloud.io/docker.io
replicaCount: 1
auth:
client:
enabled: false
quorum:
enabled: false
persistence:
enabled: false
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
metrics:
enabled: false
tls:
client:
enabled: false
quorum:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: database
EOF
2.sync by argocd
argocd app sync argocd/kafka
3.set up client tool
kubectl -n database \
create secret generic client-properties \
--from-literal=client.properties="$(printf "security.protocol=SASL_PLAINTEXT\nsasl.mechanism=SCRAM-SHA-256\nsasl.jaas.config=org.apache.kafka.common.security.scram.ScramLoginModule required username=\"user1\" password=\"$(kubectl get secret kafka-user-passwords --namespace database -o jsonpath='{.data.client-passwords}' | base64 -d | cut -d , -f 1)\";\n")"
kubectl -n database \
create secret generic client-properties \
--from-literal=client.properties="security.protocol=PLAINTEXT"
5.prepare `kafka-client-tools.yaml`
kubectl -n database apply -f - << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
name: kafka-client-tools
labels:
app: kafka-client-tools
spec:
replicas: 1
selector:
matchLabels:
app: kafka-client-tools
template:
metadata:
labels:
app: kafka-client-tools
spec:
volumes:
- name: client-properties
secret:
secretName: client-properties
containers:
- name: kafka-client-tools
image: m.daocloud.io/docker.io/bitnami/kafka:3.6.2
volumeMounts:
- name: client-properties
mountPath: /bitnami/custom/client.properties
subPath: client.properties
readOnly: true
env:
- name: BOOTSTRAP_SERVER
value: kafka.database.svc.cluster.local:9092
- name: CLIENT_CONFIG_FILE
value: /bitnami/custom/client.properties
- name: ZOOKEEPER_CONNECT
value: kafka-zookeeper.database.svc.cluster.local:2181
command:
- tail
- -f
- /etc/hosts
imagePullPolicy: IfNotPresent
EOF
6.validate function
- list topicskubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --list'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --create --if-not-exists --topic test-topic'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-topics.sh --bootstrap-server $BOOTSTRAP_SERVER --command-config $CLIENT_CONFIG_FILE --describe --topic test-topic'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'for message in $(seq 0 10); do echo $message | kafka-console-producer.sh --bootstrap-server $BOOTSTRAP_SERVER --producer.config $CLIENT_CONFIG_FILE --topic test-topic; done'
kubectl -n database exec -it deployment/kafka-client-tools -- bash -c \
'kafka-console-consumer.sh --bootstrap-server $BOOTSTRAP_SERVER --consumer.config $CLIENT_CONFIG_FILE --topic test-topic --from-beginning'
Preliminary
1. Docker has installed, if not check πlinkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p kafka/data
chmod -R 777 kafka/data
podman run --rm \
--name kafka-server \
--hostname kafka-server \
-p 9092:9092 \
-p 9094:9094 \
-v $(pwd)/kafka/data:/bitnami/kafka/data \
-e KAFKA_CFG_NODE_ID=0 \
-e KAFKA_CFG_PROCESS_ROLES=controller,broker \
-e KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka-server:9093 \
-e KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093,EXTERNAL://:9094 \
-e KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://kafka:9092,EXTERNAL://host.containers.internal:9094 \
-e KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,EXTERNAL:PLAINTEXT,PLAINTEXT:PLAINTEXT \
-e KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER \
-d m.daocloud.io/docker.io/bitnami/kafka:3.6.2
2.list topic
BOOTSTRAP_SERVER=host.containers.internal:9094
podman run --rm \
-it m.daocloud.io/docker.io/bitnami/kafka:3.6.2 kafka-topics.sh \
--bootstrap-server $BOOTSTRAP_SERVER --list
2.create topic
BOOTSTRAP_SERVER=host.containers.internal:9094
# BOOTSTRAP_SERVER=10.200.60.64:9094
TOPIC=test-topic
podman run --rm \
-it m.daocloud.io/docker.io/bitnami/kafka:3.6.2 kafka-topics.sh \
--bootstrap-server $BOOTSTRAP_SERVER \
--create \
--if-not-exists \
--topic $TOPIC
2.consume record
BOOTSTRAP_SERVER=host.containers.internal:9094
# BOOTSTRAP_SERVER=10.200.60.64:9094
TOPIC=test-topic
podman run --rm \
-it m.daocloud.io/docker.io/bitnami/kafka:3.6.2 kafka-console-consumer.sh \
--bootstrap-server $BOOTSTRAP_SERVER \
--topic $TOPIC \
--from-beginning
FAQ
Install MariaDB
Installation
Preliminary
1. Kubernetes has installed, if not check πlink2. argoCD has installed, if not check πlink3. cert-manager has installed on argocd and the clusterissuer has a named `self-signed-ca-issuer`service, , if not check πlink1.prepare mariadb credentials secret
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.prepare `deploy-mariadb.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: mariadb
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: mariadb
targetRevision: 16.3.2
helm:
releaseName: mariadb
values: |
architecture: standalone
auth:
database: test-mariadb
username: aaron.yang
existingSecret: mariadb-credentials
primary:
extraFlags: "--character-set-server=utf8mb4 --collation-server=utf8mb4_bin"
persistence:
enabled: false
secondary:
replicaCount: 1
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: database
3.deploy mariadb
kubectl -n argocd apply -f deploy-mariadb.yaml
4.sync by argocd
argocd app sync argocd/mariadb
5.check mariadb
kubectl -n database get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d
Preliminary
1. Kubernetes has installed, if not check πlink2. ArgoCD has installed, if not check πlink3. Argo Workflow has installed, if not check πlink1.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n database create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.apply rolebinding to k8s
kubectl -n argocd apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
3.prepare mariadb credentials secret
kubectl -n application create secret generic mariadb-credentials \
--from-literal=mariadb-root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=mariadb-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
4.prepare `deploy-mariadb-flow.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-mariadb-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: init-db-tool
template: init-db-tool
dependencies:
- wait
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-mariadb
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: mariadb
targetRevision: 16.5.0
helm:
releaseName: app-mariadb
values: |
architecture: standalone
auth:
database: geekcity
username: aaron.yang
existingSecret: mariadb-credentials
primary:
persistence:
enabled: false
secondary:
replicaCount: 1
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-mariadb ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-mariadb
- name: init-db-tool
resource:
action: apply
manifest: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-mariadb-tool
namespace: application
labels:
app.kubernetes.io/name: mariadb-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: mariadb-tool
template:
metadata:
labels:
app.kubernetes.io/name: mariadb-tool
spec:
containers:
- name: mariadb-tool
image: m.daocloud.io/docker.io/bitnami/mariadb:10.5.12-debian-10-r0
imagePullPolicy: IfNotPresent
env:
- name: MARIADB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
key: mariadb-root-password
name: mariadb-credentials
- name: TZ
value: Asia/Shanghai
5.subimit to argo workflow client
argo -n business-workflows submit deploy-mariadb-flow.yaml
6.decode password
kubectl -n application get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d
Preliminary
1. Docker has installed, if not check πlinkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p mariadb/data
podman run \
-p 3306:3306 \
-e MARIADB_ROOT_PASSWORD=mysql \
-d m.daocloud.io/docker.io/library/mariadb:11.2.2-jammy \
--log-bin \
--binlog-format=ROW
2.use web console
And then you can visit πhttp://localhost:8080username: `root`
password: `mysql`
podman run --rm -p 8080:80 \
-e PMA_ARBITRARY=1 \
-d m.daocloud.io/docker.io/library/phpmyadmin:5.1.1-apache
3.use internal client
podman run --rm \
-e MYSQL_PWD=mysql \
-it m.daocloud.io/docker.io/library/mariadb:11.2.2-jammy \
mariadb \
--host host.containers.internal \
--port 3306 \
--user root \
--database mysql \
--execute 'select version()'
Useful SQL
- list all bin logs
SHOW BINARY LOGS;
- delete previous bin logs
PURGE BINARY LOGS TO 'mysqld-bin.0000003'; # delete mysqld-bin.0000001 and mysqld-bin.0000002
PURGE BINARY LOGS BEFORE 'yyyy-MM-dd HH:mm:ss';
PURGE BINARY LOGS DATE_SUB(NOW(), INTERVAL 3 DAYS); # delete last three days bin log file.
If you using master-slave mode, you can change all BINARY to MASTER
FAQ
Install Milvus
Preliminary
- Kubernetes has installed, if not check link
- argoCD has installed, if not check link
- cert-manager has installed on argocd and the clusterissuer has a named
self-signed-ca-issuer
service, , if not check link - minio has installed, if not check link
Steps
1. copy minio credentials secret
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
kubectl -n storage get secret minio-secret -o json \
| jq 'del(.metadata["namespace","creationTimestamp","resourceVersion","selfLink","uid"])' \
| kubectl -n database apply -f -
2. prepare deploy-milvus.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: milvus
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: registry-1.docker.io/bitnamicharts
chart: milvus
targetRevision: 11.2.4
helm:
releaseName: milvus
values: |
global:
security:
allowInsecureImages: true
milvus:
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/milvus
tag: 2.5.7-debian-12-r0
pullPolicy: IfNotPresent
auth:
enabled: false
initJob:
forceRun: false
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/pymilvus
tag: 2.5.6-debian-12-r0
pullPolicy: IfNotPresent
resources:
requests:
cpu: 2
memory: 512Mi
limits:
cpu: 2
memory: 2Gi
dataCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 512Mi
limits:
cpu: 2
memory: 2Gi
metrics:
enabled: true
rootCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
queryCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
indexCoord:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
dataNode:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
queryNode:
replicaCount: 1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
indexNode:
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
proxy:
replicaCount: 1
service:
type: ClusterIP
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
attu:
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/attu
tag: 2.5.5-debian-12-r1
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
service:
type: ClusterIP
ingress:
enabled: true
ingressClassName: "nginx"
annotations:
cert-manager.io/cluster-issuer: alidns-webhook-zverse-letsencrypt
hostname: milvus.dev.tech
path: /
pathType: ImplementationSpecific
tls: true
waitContainer:
image:
registry: m.lab.zverse.space/docker.io
repository: bitnami/os-shell
tag: 12-debian-12-r40
pullPolicy: IfNotPresent
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 4Gi
externalS3:
host: "minio.storage"
port: 9000
existingSecret: "minio-secret"
existingSecretAccessKeyIDKey: "root-user"
existingSecretKeySecretKey: "root-password"
bucket: "milvus"
rootPath: "file"
etcd:
enabled: true
image:
registry: m.lab.zverse.space/docker.io
replicaCount: 1
auth:
rbac:
create: false
client:
secureTransport: false
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
persistence:
enabled: true
storageClass: ""
size: 2Gi
preUpgradeJob:
enabled: false
minio:
enabled: false
kafka:
enabled: true
image:
registry: m.lab.zverse.space/docker.io
controller:
replicaCount: 1
livenessProbe:
failureThreshold: 8
resources:
requests:
cpu: 500m
memory: 1Gi
limits:
cpu: 2
memory: 2Gi
persistence:
enabled: true
storageClass: ""
size: 2Gi
service:
ports:
client: 9092
extraConfig: |-
offsets.topic.replication.factor=3
listeners:
client:
protocol: PLAINTEXT
interbroker:
protocol: PLAINTEXT
external:
protocol: PLAINTEXT
sasl:
enabledMechanisms: "PLAIN"
client:
users:
- user
broker:
replicaCount: 0
destination:
server: https://kubernetes.default.svc
namespace: database
3. apply to k8s
kubectl -n argocd apply -f deploy-milvus.yaml
4. sync by argocd
argocd app sync argocd/milvus
5. check Attu WebUI
milvus address: milvus-proxy:19530
milvus database: default
https://milvus.dev.tech:32443/#/
5. [Optional] import data
import data by using sql file
MARIADB_ROOT_PASSWORD=$(kubectl -n database get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d)
POD_NAME=$(kubectl get pod -n database -l "app.kubernetes.io/name=mariadb-tool" -o jsonpath="{.items[0].metadata.name}") \
&& export SQL_FILENAME="Dump20240301.sql" \
&& kubectl -n database cp ${SQL_FILENAME} ${POD_NAME}:/tmp/${SQL_FILENAME} \
&& kubectl -n database exec -it deployment/app-mariadb-tool -- bash -c \
'echo "create database ccds;" | mysql -h mariadb.database -uroot -p$MARIADB_ROOT_PASSWORD' \
&& kubectl -n database exec -it ${POD_NAME} -- bash -c \
"mysql -h mariadb.database -uroot -p\${MARIADB_ROOT_PASSWORD} \
ccds < /tmp/Dump20240301.sql"
6. [Optional] decode password
kubectl -n database get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d
7. [Optional] execute sql in pod
kubectl -n database exec -it xxxx bash
mariadb -h 127.0.0.1 -u root -p$MARIADB_ROOT_PASSWORD
And then you can check connection by
show status like 'Threads%';
Install Neo4j
Installation
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink1.get helm repo
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check πhttps://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink1.prepare `deploy-xxxxx.yaml`
2.apply to k8s
kubectl -n argocd apply -f xxxx.yaml
3.sync by argocd
argocd app sync argocd/xxxx
4.prepare yaml-content.yaml
5.apply to k8s
kubectl apply -f xxxx.yaml
6.apply xxxx.yaml directly
kubectl apply -f - <<EOF
EOF
Preliminary
1. Docker|Podman|Buildah has installed, if not check πlinkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p neo4j/data
podman run --rm \
--name neo4j \
-p 7474:7474 \
-p 7687:7687 \
-e neo4j_ROOT_PASSWORD=mysql \
-v $(pwd)/neo4j/data:/data \
-d docker.io/library/neo4j:5.18.0-community-bullseye
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink4. Argo Workflow has installed, if not check πlink1.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
4.prepare `deploy-xxxx-flow.yaml`
6.subimit to argo workflow client
argo -n business-workflows submit deploy-xxxx-flow.yaml
7.decode password
kubectl -n application get secret xxxx-credentials -o jsonpath='{.data.xxx-password}' | base64 -d
FAQ
Install Postgresql
Installation
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink1.get helm repo
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check πhttps://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink1.prepare `deploy-xxxxx.yaml`
2.apply to k8s
kubectl -n argocd apply -f xxxx.yaml
3.sync by argocd
argocd app sync argocd/xxxx
4.prepare yaml-content.yaml
5.apply to k8s
kubectl apply -f xxxx.yaml
6.apply xxxx.yaml directly
kubectl apply -f - <<EOF
EOF
Preliminary
1. Docker|Podman|Buildah has installed, if not check πlinkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p $(pwd)/postgresql/data
podman run --rm \
--name postgresql \
-p 5432:5432 \
-e POSTGRES_PASSWORD=postgresql \
-e PGDATA=/var/lib/postgresql/data/pgdata \
-v $(pwd)/postgresql/data:/var/lib/postgresql/data \
-d docker.io/library/postgres:15.2-alpine3.17
2.use web console
podman run --rm \
-p 8080:80 \
-e 'PGADMIN_DEFAULT_EMAIL=ben.wangz@foxmail.com' \
-e 'PGADMIN_DEFAULT_PASSWORD=123456' \
-d docker.io/dpage/pgadmin4:6.15
3.use internal client
podman run --rm \
--env PGPASSWORD=postgresql \
--entrypoint psql \
-it docker.io/library/postgres:15.2-alpine3.17 \
--host host.containers.internal \
--port 5432 \
--username postgres \
--dbname postgres \
--command 'select version()'
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink4. Argo Workflow has installed, if not check πlink5. Minio artifact repository has been configured, if not check πlink- endpoint: minio.storage:90001.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
ARGOCD_USERNAME=admin
ARGOCD_PASSWORD=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
kubectl -n business-workflows create secret generic argocd-login-credentials \
--from-literal=username=${ARGOCD_USERNAME} \
--from-literal=password=${ARGOCD_PASSWORD}
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
3.prepare postgresql admin credentials secret
kubectl -n application create secret generic postgresql-credentials \
--from-literal=postgres-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16) \
--from-literal=replication-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
4.prepare `deploy-postgresql-flow.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-pg-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: init-db-tool
template: init-db-tool
dependencies:
- wait
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-postgresql
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: postgresql
targetRevision: 14.2.2
helm:
releaseName: app-postgresql
values: |
architecture: standalone
auth:
database: geekcity
username: aaron.yang
existingSecret: postgresql-credentials
primary:
persistence:
enabled: false
readReplicas:
replicaCount: 1
persistence:
enabled: false
backup:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-postgresql ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-postgresql
- name: init-db-tool
resource:
action: apply
manifest: |
apiVersion: apps/v1
kind: Deployment
metadata:
name: app-postgresql-tool
namespace: application
labels:
app.kubernetes.io/name: postgresql-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: postgresql-tool
template:
metadata:
labels:
app.kubernetes.io/name: postgresql-tool
spec:
containers:
- name: postgresql-tool
image: m.daocloud.io/docker.io/bitnami/postgresql:14.4.0-debian-11-r9
imagePullPolicy: IfNotPresent
env:
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
key: postgres-password
name: postgresql-credentials
- name: TZ
value: Asia/Shanghai
command:
- tail
args:
- -f
- /etc/hosts
6.subimit to argo workflow client
argo -n business-workflows submit deploy-postgresql.yaml
7.decode password
kubectl -n application get secret postgresql-credentials -o jsonpath='{.data.postgres-password}' | base64 -d
8.import data
POSTGRES_PASSWORD=$(kubectl -n application get secret postgresql-credentials -o jsonpath='{.data.postgres-password}' | base64 -d) \
POD_NAME=$(kubectl get pod -n application -l "app.kubernetes.io/name=postgresql-tool" -o jsonpath="{.items[0].metadata.name}") \
&& export SQL_FILENAME="init_dfs_table_data.sql" \
&& kubectl -n application cp ${SQL_FILENAME} ${POD_NAME}:/tmp/${SQL_FILENAME} \
&& kubectl -n application exec -it deployment/app-postgresql-tool -- bash -c \
'echo "CREATE DATABASE csst;" | PGPASSWORD="$POSTGRES_PASSWORD" \
psql --host app-postgresql.application -U postgres -d postgres -p 5432' \
&& kubectl -n application exec -it deployment/app-postgresql-tool -- bash -c \
'PGPASSWORD="$POSTGRES_PASSWORD" psql --host app-postgresql.application \
-U postgres -d csst -p 5432 < /tmp/init_dfs_table_data.sql'
FAQ
Install Redis
Installation
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink1.get helm repo
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check πhttps://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink1.prepare `deploy-xxxxx.yaml`
2.apply to k8s
kubectl -n argocd apply -f xxxx.yaml
3.sync by argocd
argocd app sync argocd/xxxx
4.prepare yaml-content.yaml
5.apply to k8s
kubectl apply -f xxxx.yaml
6.apply xxxx.yaml directly
kubectl apply -f - <<EOF
EOF
Preliminary
1. Docker|Podman|Buildah has installed, if not check πlinkyou can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
mkdir -p $(pwd)/redis/data
podman run --rm \
--name redis \
-p 6379:6379 \
-d docker.io/library/redis:7.2.4-alpine
1.use internal client
podman run --rm \
-it docker.io/library/redis:7.2.4-alpine \
redis-cli \
-h host.containers.internal \
set mykey somevalue
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink4. Argo Workflow has installed, if not check πlink5. Minio artifact repository has been configured, if not check πlink- endpoint: minio.storage:90001.prepare `argocd-login-credentials`
ARGOCD_USERNAME=admin
ARGOCD_PASSWORD=$(kubectl -n argocd get secret argocd-initial-admin-secret -o jsonpath="{.data.password}" | base64 -d)
kubectl -n business-workflows create secret generic argocd-login-credentials \
--from-literal=username=${ARGOCD_USERNAME} \
--from-literal=password=${ARGOCD_PASSWORD}
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
3.prepare redis credentials secret
kubectl -n application create secret generic redis-credentials \
--from-literal=redis-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
4.prepare `deploy-redis-flow.yaml`
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-argocd-app-redis-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: app-redis
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: redis
targetRevision: 18.16.0
helm:
releaseName: app-redis
values: |
architecture: replication
auth:
enabled: true
sentinel: true
existingSecret: redis-credentials
master:
count: 1
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: false
replica:
replicaCount: 3
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sentinel:
enabled: false
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sysctl:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/app-redis ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/app-redis
6.subimit to argo workflow client
argo -n business-workflows submit deploy-redis-flow.yaml
7.decode password
kubectl -n application get secret redis-credentials -o jsonpath='{.data.redis-password}' | base64 -d