Subsections of Networking
Install Ingress
Installation
Install By
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
for more information, you can check πhttps://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check πlink2. argoCD has installed, if not check πlink
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ingress-nginx
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://aaronyang0628.github.io/helm-chart-mirror/charts
chart: ingress-nginx
targetRevision: 4.11.3
helm:
releaseName: ingress-nginx
values: |
controller:
image:
registry: m.daocloud.io
image: registry.k8s.io/ingress-nginx/controller
tag: "v1.9.5"
pullPolicy: IfNotPresent
service:
enabled: true
type: NodePort
nodePorts:
http: 32080
https: 32443
tcp:
8080: 32808
admissionWebhooks:
enabled: true
patch:
enabled: true
image:
registry: m.daocloud.io
image: registry.k8s.io/ingress-nginx/kube-webhook-certgen
tag: v20231011-8b53cabe0
pullPolicy: IfNotPresent
defaultBackend:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: basic-components
kubectl -n argocd apply -f ingress-nginx.yaml
argocd app sync argocd/ingress-nginx
install based on docker
echo "start from head is important"
FAQ
Install Istio
Installation
Install By
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink1.get helm repo
helm repo add ay-helm-mirror https://aaronyang0628.github.io/helm-chart-mirror/charts
helm repo update
2.install chart
helm install ay-helm-mirror/kube-prometheus-stack --generate-name
Using Proxy
for more information, you can check πhttps://artifacthub.io/packages/helm/prometheus-community/prometheus
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink1.prepare `deploy-istio-base.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: istio-base
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://istio-release.storage.googleapis.com/charts
chart: base
targetRevision: 1.23.2
helm:
releaseName: istio-base
values: |
defaults:
global:
istioNamespace: istio-system
base:
enableCRDTemplates: false
enableIstioConfigCRDs: true
defaultRevision: "default"
destination:
server: https://kubernetes.default.svc
namespace: istio-system
EOF
2.sync by argocd
argocd app sync argocd/istio-base
3.prepare `deploy-istiod.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: istiod
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://istio-release.storage.googleapis.com/charts
chart: istiod
targetRevision: 1.23.2
helm:
releaseName: istiod
values: |
defaults:
global:
istioNamespace: istio-system
defaultResources:
requests:
cpu: 10m
memory: 128Mi
limits:
cpu: 100m
memory: 128Mi
hub: m.daocloud.io/docker.io/istio
proxy:
autoInject: disabled
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
pilot:
autoscaleEnabled: true
resources:
requests:
cpu: 500m
memory: 2048Mi
cpu:
targetAverageUtilization: 80
podAnnotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
destination:
server: https://kubernetes.default.svc
namespace: istio-system
EOF
4.sync by argocd
argocd app sync argocd/istiod
5.prepare `deploy-istio-ingressgateway.yaml`
kubectl -n argocd apply -f - << EOF
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: istio-ingressgateway
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://istio-release.storage.googleapis.com/charts
chart: gateway
targetRevision: 1.23.2
helm:
releaseName: istio-ingressgateway
values: |
defaults:
replicaCount: 1
podAnnotations:
inject.istio.io/templates: "gateway"
sidecar.istio.io/inject: "true"
cluster-autoscaler.kubernetes.io/safe-to-evict: "true"
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 2000m
memory: 1024Mi
service:
type: LoadBalancer
ports:
- name: status-port
port: 15021
protocol: TCP
targetPort: 15021
- name: http2
port: 80
protocol: TCP
targetPort: 80
- name: https
port: 443
protocol: TCP
targetPort: 443
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 5
destination:
server: https://kubernetes.default.svc
namespace: istio-system
EOF
6.sync by argocd
argocd app sync argocd/istio-ingressgateway
Preliminary
1. Kubernetes has installed, if not check πlink2. Helm has installed, if not check πlink3. ArgoCD has installed, if not check πlink4. Argo Workflow has installed, if not check πlink1.prepare `argocd-login-credentials`
kubectl get namespaces database > /dev/null 2>&1 || kubectl create namespace database
2.apply rolebinding to k8s
kubectl apply -f - <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: application-administrator
rules:
- apiGroups:
- argoproj.io
resources:
- applications
verbs:
- '*'
- apiGroups:
- apps
resources:
- deployments
verbs:
- '*'
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: argocd
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: application-administration
namespace: application
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: application-administrator
subjects:
- kind: ServiceAccount
name: argo-workflow
namespace: business-workflows
EOF
4.prepare `deploy-xxxx-flow.yaml`
6.subimit to argo workflow client
argo -n business-workflows submit deploy-xxxx-flow.yaml
7.decode password
kubectl -n application get secret xxxx-credentials -o jsonpath='{.data.xxx-password}' | base64 -d
FAQ
Install Nginx
1. prepare server.conf
cat << EOF > default.conf
server {
listen 80;
location / {
root /usr/share/nginx/html;
autoindex on;
}
}
EOF
2. install
mkdir $(pwd)/data
podman run --rm -p 8080:80 \
-v $(pwd)/data:/usr/share/nginx/html:ro \
-v $(pwd)/default.conf:/etc/nginx/conf.d/default.conf:ro \
-d docker.io/library/nginx:1.19.9-alpine
echo 'this is a test' > $(pwd)/data/some-data.txt
Tip
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
visit http://localhost:8080