Subsections of Storage
Deploy Artifict Repository
Preliminary
- Kubernetes has installed, if not check link
- minio is ready for artifact repository
endpoint:
minio.storage:9000
Steps
1. prepare bucket for s3 artifact repository
# K8S_MASTER_IP could be you master ip or loadbalancer external ip
K8S_MASTER_IP=172.27.253.27
MINIO_ACCESS_SECRET=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.rootPassword}' | base64 -d)
podman run --rm \
--entrypoint bash \
--add-host=minio-api.dev.geekcity.tech:${K8S_MASTER_IP} \
-it docker.io/minio/mc:latest \
-c "mc alias set minio http://minio-api.dev.geekcity.tech admin ${MINIO_ACCESS_SECRET} \
&& mc ls minio \
&& mc mb --ignore-existing minio/argo-workflows-artifacts"
2. prepare secret s3-artifact-repository-credentials
will create business-workflows namespace
MINIO_ACCESS_KEY=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.rootUser}' | base64 -d)
kubectl -n business-workflows create secret generic s3-artifact-repository-credentials \
--from-literal=accessKey=${MINIO_ACCESS_KEY} \
--from-literal=secretKey=${MINIO_ACCESS_SECRET}
3. prepare configMap artifact-repositories.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: artifact-repositories
annotations:
workflows.argoproj.io/default-artifact-repository: default-artifact-repository
data:
default-artifact-repository: |
s3:
endpoint: minio.storage:9000
insecure: true
accessKeySecret:
name: s3-artifact-repository-credentials
key: accessKey
secretKeySecret:
name: s3-artifact-repository-credentials
key: secretKey
bucket: argo-workflows-artifacts
4. apply artifact-repositories.yaml
to k8s
kubectl -n business-workflows apply -f artifact-repositories.yaml
Install Minio
Installation
Preliminary
1. Kubernetes has installed, if not check πlink2. ArgoCD has installed, if not check πlink3. Ingres has installed on argoCD, if not check πlink4. Cert-manager has installed on argocd and the clusterissuer has a named `self-signed-ca-issuer`service, , if not check πlink1.prepare minio credentials secret
Details
kubectl get namespaces storage > /dev/null 2>&1 || kubectl create namespace storage
kubectl -n storage create secret generic minio-secret \
--from-literal=root-user=admin \
--from-literal=root-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.prepare `deploy-minio.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: minio
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: minio
targetRevision: 16.0.10
helm:
releaseName: minio
values: |
global:
imageRegistry: "m.daocloud.io/docker.io"
imagePullSecrets: []
storageClass: ""
security:
allowInsecureImages: true
compatibility:
openshift:
adaptSecurityContext: auto
image:
registry: m.daocloud.io/docker.io
repository: bitnami/minio
clientImage:
registry: m.daocloud.io/docker.io
repository: bitnami/minio-client
mode: standalone
defaultBuckets: ""
auth:
# rootUser: admin
# rootPassword: ""
existingSecret: "minio-secret"
statefulset:
updateStrategy:
type: RollingUpdate
podManagementPolicy: Parallel
replicaCount: 1
zones: 1
drivesPerNode: 1
resourcesPreset: "micro"
resources:
requests:
memory: 512Mi
cpu: 250m
limits:
memory: 512Mi
cpu: 250m
ingress:
enabled: true
ingressClassName: "nginx"
hostname: minio-console.ay.dev
path: /?(.*)
pathType: ImplementationSpecific
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /$1
cert-manager.io/cluster-issuer: self-signed-ca-issuer
tls: true
selfSigned: true
extraHosts: []
apiIngress:
enabled: true
ingressClassName: "nginx"
hostname: minio-api.ay.dev
path: /?(.*)
pathType: ImplementationSpecific
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /$1
cert-manager.io/cluster-issuer: self-signed-ca-issuer
tls: true
selfSigned: true
extraHosts: []
persistence:
enabled: false
storageClass: ""
mountPath: /bitnami/minio/data
accessModes:
- ReadWriteOnce
size: 8Gi
annotations: {}
existingClaim: ""
metrics:
prometheusAuthType: public
enabled: false
serviceMonitor:
enabled: false
namespace: ""
labels: {}
jobLabel: ""
paths:
- /minio/v2/metrics/cluster
- /minio/v2/metrics/node
interval: 30s
scrapeTimeout: ""
honorLabels: false
prometheusRule:
enabled: false
namespace: ""
additionalLabels: {}
rules: []
destination:
server: https://kubernetes.default.svc
namespace: storage
EOF
3.sync by argocd
Details
argocd app sync argocd/minio
4.decode minio secret
Details
kubectl -n storage get secret minio-secret -o jsonpath='{.data.root-password}' | base64 -d
5.visit web console
Login Credentials
add $K8S_MASTER_IP minio-console.ay.dev
to /etc/hosts
address: πhttp://minio-console.ay.dev:32080/login
access key:
admin
secret key: ``
6.using mc
Details
K8S_MASTER_IP=$(kubectl get node -l node-role.kubernetes.io/control-plane -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
MINIO_ACCESS_SECRET=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.root-password}' | base64 -d)
podman run --rm \
--entrypoint bash \
--add-host=minio-api.dev.tech:${K8S_MASTER_IP} \
-it m.daocloud.io/docker.io/minio/mc:latest \
-c "mc alias set minio http://minio-api.dev.tech:32080 admin ${MINIO_ACCESS_SECRET} \
&& mc ls minio \
&& mc mb --ignore-existing minio/test \
&& mc cp /etc/hosts minio/test/etc/hosts \
&& mc ls --recursive minio"
Details
K8S_MASTER_IP=$(kubectl get node -l node-role.kubernetes.io/control-plane -o jsonpath='{.items[0].status.addresses[?(@.type=="InternalIP")].address}')
MINIO_ACCESS_SECRET=$(kubectl -n storage get secret minio-secret -o jsonpath='{.data.root-password}' | base64 -d)
podman run --rm \
--entrypoint bash \
--add-host=minio-api.dev.tech:${K8S_MASTER_IP} \
-it m.daocloud.io/docker.io/minio/mc:latest
Preliminary
1. Docker has installed, if not check πlinkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
mkdir -p $(pwd)/minio/data
podman run --rm \
--name minio-server \
-p 9000:9000 \
-p 9001:9001 \
-v $(pwd)/minio/data:/data \
-d docker.io/minio/minio:latest server /data --console-address :9001
2.use web console
And then you can visit πhttp://localhost:9001username: `minioadmin`
password: `minioadmin`
3.use internal client
Details
podman run --rm \
--entrypoint bash \
-it docker.io/minio/mc:latest \
-c "mc alias set minio http://host.docker.internal:9000 minioadmin minioadmin \
&& mc ls minio \
&& mc mb --ignore-existing minio/test \
&& mc cp /etc/hosts minio/test/etc/hosts \
&& mc ls --recursive minio"
FAQ
Install NFS
Installation
Preliminary
1. Kubernetes has installed, if not check πlink2. argoCD has installed, if not check πlink3. ingres has installed on argoCD, if not check πlink1.prepare `nfs-provisioner.yaml`
Details
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: nfs-provisioner
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
chart: nfs-subdir-external-provisioner
targetRevision: 4.0.18
helm:
releaseName: nfs-provisioner
values: |
image:
repository: m.daocloud.io/registry.k8s.io/sig-storage/nfs-subdir-external-provisioner
pullPolicy: IfNotPresent
nfs:
server: nfs.services.test
path: /
mountOptions:
- vers=4
- minorversion=0
- rsize=1048576
- wsize=1048576
- hard
- timeo=600
- retrans=2
- noresvport
volumeName: nfs-subdir-external-provisioner-nas
reclaimPolicy: Retain
storageClass:
create: true
defaultClass: true
name: nfs-external-nas
destination:
server: https://kubernetes.default.svc
namespace: storage
3.deploy mariadb
Details
kubectl -n argocd apply -f nfs-provisioner.yaml
4.sync by argocd
Details
argocd app sync argocd/nfs-provisioner
Preliminary
1. Docker has installed, if not check πlinkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
echo -e "nfs\nnfsd" > /etc/modules-load.d/nfs4.conf
modprobe nfs && modprobe nfsd
mkdir -p $(pwd)/data/nfs/data
echo '/data *(rw,fsid=0,no_subtree_check,insecure,no_root_squash)' > $(pwd)/data/nfs/exports
podman run \
--name nfs4 \
--rm \
--privileged \
-p 2049:2049 \
-v $(pwd)/data/nfs/data:/data \
-v $(pwd)/data/nfs/exports:/etc/exports:ro \
-d docker.io/erichough/nfs-server:2.2.1
Preliminary
1. centos yum repo source has updated, if not check πlink2.1.install nfs util
sudo apt update -y
sudo apt-get install nfs-common
dnf update -y
dnf install -y nfs-utils rpcbindn
sudo apt update -y
sudo apt-get install nfs-common
2. create share folder
Details
mkdir /data && chmod 755 /data
3.edit `/etc/exports`
Details
/data *(rw,sync,insecure,no_root_squash,no_subtree_check)
4.start nfs server
Details
systemctl enable rpcbind
systemctl enable nfs-server
systemctl start rpcbind
systemctl start nfs-server
5.test load on localhost
Details
showmount -e localhost
6.test load on other ip
Details
showmount -e 192.168.aa.bb
7.mount nfs disk
Details
mkdir -p $(pwd)/mnt/nfs
sudo mount -v 192.168.aa.bb:/data $(pwd)/mnt/nfs -o proto=tcp -o nolock
8.set nfs auto mount
Details
echo "192.168.aa.bb:/data /data nfs rw,auto,nofail,noatime,nolock,intr,tcp,actimeo=1800 0 0" >> /etc/fstab
df -h
Notes
[Optional] create new partition
fdisk /dev/vdb
# n
# p
# w
parted
#select /dev/vdb
#mklabel gpt
#mkpart primary 0 -1
#Cancel
#mkpart primary 0% 100%
#print
[Optional]Format disk
mkfs.xfs /dev/vdb1 -f
[Optional] mount disk to folder
mount /dev/vdb1 /data
[Optional] mount when restart
#vim `/etc/fstab`
/dev/vdb1 /data xfs defaults 0 0
FAQ
Install Reids
Installation
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink1.get helm repo
Details
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
Details
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
Using Proxy
for more information, you can check πhttps://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink1.prepare redis secret
Details
kubectl get namespaces storage > /dev/null 2>&1 || kubectl create namespace storage
kubectl -n storage create secret generic redis-credentials \
--from-literal=redis-password=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 16)
2.prepare `deploy-redis.yaml`
Details
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: redis
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: redis
targetRevision: 18.16.0
helm:
releaseName: redis
values: |
architecture: replication
auth:
enabled: true
sentinel: true
existingSecret: redis-credentials
master:
count: 1
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: true
storageClass: nfs-external
size: 8Gi
replica:
replicaCount: 3
disableCommands:
- FLUSHDB
- FLUSHALL
persistence:
enabled: true
storageClass: nfs-external
size: 8Gi
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sentinel:
enabled: false
persistence:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
metrics:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
volumePermissions:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
sysctl:
enabled: false
image:
registry: m.daocloud.io/docker.io
pullPolicy: IfNotPresent
extraDeploy:
- |
apiVersion: apps/v1
kind: Deployment
metadata:
name: redis-tool
namespace: csst
labels:
app.kubernetes.io/name: redis-tool
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: redis-tool
template:
metadata:
labels:
app.kubernetes.io/name: redis-tool
spec:
containers:
- name: redis-tool
image: m.daocloud.io/docker.io/bitnami/redis:7.2.4-debian-12-r8
imagePullPolicy: IfNotPresent
env:
- name: REDISCLI_AUTH
valueFrom:
secretKeyRef:
key: redis-password
name: redis-credentials
- name: TZ
value: Asia/Shanghai
command:
- tail
- -f
- /etc/hosts
destination:
server: https://kubernetes.default.svc
namespace: storage
EOF
3.sync by argocd
Details
argocd app sync argocd/redis
4.decode password
Details
kubectl -n storage get secret redis-credentials -o jsonpath='{.data.redis-password}' | base64 -d
Preliminary
1. Docker|Podman|Buildah has installed, if not check πlinkUsing Proxy
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
1.init server
Details
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink4. Argo Workflow has installed, if not check πlink1.prepare `argocd-login-credentials`
Details
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
2.apply rolebinding to k8s
Details
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
4.prepare `deploy-xxxx-flow.yaml`
Details
6.subimit to argo workflow client
Details
argo -n business-workflows submit deploy-xxxx-flow.yaml
7.decode password
Details
kubectl -n application get secret xxxx-credentials -o jsonpath='{.data.xxx-password}' | base64 -d
FAQ
tests
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage ping
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage set mykey somevalue
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage get mykey
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage del mykey
kubectl -n storage exec -it deployment/redis-tool -- \ redis-cli -c -h redis-master.storage get mykey