Subsections of 🛰️CSST Related
Publish Image
Subsections of Publish Image
Publish DFS Server Image
Preliminary
- argo workflows binary has installed, if not check link
1. create git-credential
GIT_USERNAME=boyang628
GIT_PASSWORD=yW9Yx__usgWy11aouzBB
kubectl -n business-workflows create secret generic git-credentials \
--from-literal="username=${GIT_USERNAME}" \
--from-literal="password=${GIT_PASSWORD}"2. [Optional] create docker-login-credential [FIXED]
DOCKER_LOGIN_USERNAME=ascm-org-1705656754517
DOCKER_LOGIN_PASSWORD=4HRXwB5IoAQWUlhc
kubectl -n business-workflows create secret generic docker-login-credentials \
--from-literal="username=${DOCKER_LOGIN_USERNAME:-wangz2019}" \
--from-literal="password=${DOCKER_LOGIN_PASSWORD}"3. prepare publish-dfs-image.yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: publish-dfs-image-
spec:
entrypoint: entry
serviceAccountName: argo-workflow
volumeClaimTemplates:
- metadata:
name: workspace
spec:
accessModes:
- ReadWriteOnce
storageClassName: nfs-external-nas
resources:
requests:
storage: 1Gi
templates:
- name: entry
dag:
tasks:
- name: dind
template: dind
- name: wait-for-dind
dependencies:
- dind
template: wait-for-dind
arguments:
parameters:
- name: dockerd-host
value: "{{tasks.dind.ip}}"
- name: publish
dependencies:
- wait-for-dind
template: publish
arguments:
parameters:
- name: dockerd-host
value: "{{tasks.dind.ip}}"
- name: dind
daemon: true
container:
image: m.daocloud.io/docker.io/library/docker:25.0.3-dind-alpine3.19
env:
- name: DOCKER_TLS_CERTDIR
value: ""
command:
- dockerd-entrypoint.sh
- --insecure-registry
- cr.registry.res.cloud.wuxi-yqgcy.cn
securityContext:
privileged: true
volumeMounts:
- name: workspace
mountPath: /workspace
- name: wait-for-dind
inputs:
parameters:
- name: dockerd-host
container:
image: m.daocloud.io/docker.io/library/docker:25.0.3-cli-alpine3.19
env:
- name: DOCKER_HOST
value: "{{inputs.parameters.dockerd-host}}"
command:
- sh
- -c
args:
- |
until docker ps; do sleep 3; done;
- name: publish
inputs:
artifacts:
- name: source
path: /workspace/src
git:
repo: https://inner-gitlab.citybrain.org/csst/csst-py.git
revision: main
usernameSecret:
name: git-credentials
key: username
passwordSecret:
name: git-credentials
key: password
parameters:
- name: dockerd-host
- name: registry-to-push
value: cr.registry.res.cloud.wuxi-yqgcy.cn
- name: image-to-publish
value: csst/dfs:v1-argo-ay-test
- name: registry
value: m.daocloud.io/docker.io
container:
image: m.daocloud.io/docker.io/library/docker:25.0.3-cli-alpine3.19
env:
- name: DOCKER_HOST
value: "{{inputs.parameters.dockerd-host}}"
- name: DOCKER_USERNAME
valueFrom:
secretKeyRef:
name: docker-login-credentials
key: username
- name: DOCKER_PASSWORD
valueFrom:
secretKeyRef:
name: docker-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export REGISTRY={{inputs.parameters.registry}}
export REGISTRY_TO_PUSH={{inputs.parameters.registry-to-push}}
export IMAGE_TO_PUBLISH=${REGISTRY_TO_PUSH}/{{inputs.parameters.image-to-publish}}
docker build \
--ulimit nofile=4096:4096 \
-f /workspace/src/Dockerfile \
--build-arg REGISTRY=${REGISTRY} \
-t ${IMAGE_TO_PUBLISH} /workspace/src \
&& docker login -u="${DOCKER_USERNAME}" -p="${DOCKER_PASSWORD}" ${REGISTRY_TO_PUSH} \
&& docker push ${IMAGE_TO_PUBLISH}
volumeMounts:
- name: workspace
mountPath: /workspace4. submit to argo workflow
argo -n business-workflows submit publish-dfs-image.yamlImport Data
Subsections of Import Data
Import CCDS MariaDB Data
Preliminary
- MariaDB has installed though argo-workflow, if not check link
- MariaDB server pod named is
app-mariadband in namespaceapplication
Warning
if the pod name and namespace isn’t match, you might need to modify following shell.
Download SQL file
wget https://inner-gitlab.citybrain.org/chang.liu/ccds-server/-/raw/ccds-v1/deploy/ccds-db-init.sql -O ccds-mariadb-init.sqlTODO the content we download is all html, fff
Using import tool
MARIADB_ROOT_PASSWORD=$(kubectl -n application get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d) \
TOOL_POD_NAME=$(kubectl get pod -n application -l "app.kubernetes.io/name=mariadb-tool" -o jsonpath="{.items[0].metadata.name}") \
&& export SQL_FILENAME="ccds-mariadb-init.sql" \
&& kubectl -n application cp ${SQL_FILENAME} ${TOOL_POD_NAME}:/tmp/${SQL_FILENAME} \
&& kubectl -n application exec -it deployment/app-mariadb-tool -- bash -c \
'echo "create database ccds;" | mysql -h app-mariadb.application -uroot -p$MARIADB_ROOT_PASSWORD' \
&& kubectl -n application exec -it ${TOOL_POD_NAME} -- bash -c \
"mysql -h app-mariadb.application -uroot -p\${MARIADB_ROOT_PASSWORD} \
ccds < /tmp/ccds-mariadb-init.sql"Build Clickhouse NFS Server
Preliminary
- Podman has installed, if not check link
1. create new partition
parted
#select /dev/vdb
#mklabel gpt
#mkpart primary 0 -1
#Cancel
#mkpart primary 0% 100%
#print2. Format disk
mkfs.xfs /dev/vdb1 -f3. mount disk to folder
mount /dev/vdb1 /data4. mount after restart server
#vim `/etc/fstab`
/dev/vdb1 /data xfs defaults 0 05. init NFSv4 Server
echo -e "nfs\nnfsd" > /etc/modules-load.d/nfs4.conf
modprobe nfs && modprobe nfsd
mkdir -p $(pwd)/data/nfs/data
echo '/data *(rw,fsid=0,no_subtree_check,insecure,no_root_squash)' > $(pwd)/data/nfs/exports
podman run \
--name nfs4 \
--rm \
--privileged \
-p 12049:2049 \
-v $(pwd)/data/nfs/data:/data \
-v $(pwd)/data/nfs/exports:/etc/exports:ro \
-d docker.io/erichough/nfs-server:2.2.1Tip
you can run an addinational daocloud image to accelerate your pulling, check Daocloud Proxy
6. [Optional] test load
sudo yum install -y nfs-utilssudo apt-get install nfs-commonsudo dnf install -y nfs-utilsclient is ok for normal user
mkdir -p $(pwd)/mnt/nfs
sudo mount -t nfs4 -o port=2049 -v localhost:/ $(pwd)/mnt/nfs
df -h7.create NFS provisioner
prepare csst-ck-nfs-provisioner.yaml
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: csst-ck-nfs-provisioner
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://kubernetes-sigs.github.io/nfs-subdir-external-provisioner
chart: nfs-subdir-external-provisioner
targetRevision: 4.0.18
helm:
releaseName: csst-ck-nfs-provisioner
values: |
image:
repository: m.daocloud.io/registry.k8s.io/sig-storage/nfs-subdir-external-provisioner
pullPolicy: IfNotPresent
nfs:
server: <$nfs.service.ip.addr>
path: /
mountOptions:
- port=12049
- vers=4
- minorversion=0
- rsize=1048576
- wsize=1048576
- hard
- timeo=600
- retrans=2
- noresvport
volumeName: csst-ck-nfs-subdir-external-provisioner-nas
reclaimPolicy: Retain
storageClass:
create: true
defaultClass: true
name: csst-nfs-external-nas
destination:
server: https://kubernetes.default.svc
namespace: basic-componentsYour NFS server will be host on <$ip>:12049
8. apply to k8s
kubectl -n argocd apply -f csst-ck-nfs-provisioner.yaml9. sync by argocd
argocd app sync argocd/csst-ck-nfs-provisionerThen you can use storage class csst-nfs-external-nas to create pv or pvc
Import CSST Clickhouse Data
Preliminary
- Clickhouse has installed though argo-workflow, if not check link
- Clickhouse server pod named is
app-clickhouseand in namespaceapplication
Warning
if the pod name and namespace isn’t match, you might need to modify following shell.
Download Data file
Currently, we mount a NFS disk to retrieve data.
Using client tool
CK_HOST="172.27.253.66"
CK_PASSWORD=$(kubectl -n application get secret clickhouse-admin-credentials -o jsonpath='{.data.password}' | base64 -d) \
&& podman run --rm --entrypoint clickhouse-client -it m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine \
--host ${CK_HOST} \
--port 30900 \
--user admin \
--password ${CK_PASSWORD} \
--query "select version()"Init Database
CREATE DATABASE IF NOT EXISTS csst ON CLUSTER default;
CREATE TABLE IF NOT EXISTS csst.msc_level2_catalog_local ON CLUSTER default
(
level2_id Int64,
OBSID String DEFAULT '',
CCDNO Int32,
objID Int32,
X Float32,
XErr Float64,
Y Float32,
YErr Float64,
RA Float64,
RAErr Float64,
DEC Float64,
DECErr Float64,
A Float32,
AErr Float32,
B Float32,
BErr Float32,
PA Float32,
Flag Int32,
Flag_ISO Int32,
Flag_ISO_Num Int32,
FWHM Float32,
AB Float32,
E Float32,
Flux_Kron Float64,
FluxErr_Kron Float32,
Mag_Kron Float64,
MagErr_Kron Float64,
Radius_Kron Float64,
Sky Float32,
Flux_Aper1 Float32,
FluxErr_Aper1 Float32,
Mag_Aper1 Float32,
MagErr_Aper1 Float32,
Flux_Aper2 Float32,
FluxErr_Aper2 Float32,
Mag_Aper2 Float32,
MagErr_Aper2 Float32,
Flux_Aper3 Float32,
FluxErr_Aper3 Float32,
Mag_Aper3 Float32,
MagErr_Aper3 Float32,
Flux_Aper4 Float32,
FluxErr_Aper4 Float32,
Mag_Aper4 Float32,
MagErr_Aper4 Float32,
Flux_Aper5 Float32,
FluxErr_Aper5 Float32,
Mag_Aper5 Float32,
MagErr_Aper5 Float32,
Flux_Aper6 Float32,
FluxErr_Aper6 Float32,
Mag_Aper6 Float32,
MagErr_Aper6 Float32,
Flux_Aper7 Float32,
FluxErr_Aper7 Float32,
Mag_Aper7 Float32,
MagErr_Aper7 Float32,
Flux_Aper8 Float32,
FluxErr_Aper8 Float32,
Mag_Aper8 Float32,
MagErr_Aper8 Float32,
Flux_Aper9 Float32,
FluxErr_Aper9 Float32,
Mag_Aper9 Float32,
MagErr_Aper9 Float32,
Flux_Aper10 Float32,
FluxErr_Aper10 Float32,
Mag_Aper10 Float32,
MagErr_Aper10 Float32,
Flux_Aper11 Float32,
FluxErr_Aper11 Float32,
Mag_Aper11 Float32,
MagErr_Aper11 Float32,
Flux_Aper12 Float32,
FluxErr_Aper12 Float32,
Mag_Aper12 Float32,
MagErr_Aper12 Float32,
Type Int32,
R20 Float32,
R50 Float32,
R90 Float32,
X_PSF Float64,
Y_PSF Float64,
RA_PSF Float64,
DEC_PSF Float64,
Chi2_PSF Float32,
Flux_PSF Float32,
FluxErr_PSF Float32,
Mag_PSF Float32,
MagErr_PSF Float32,
X_Model Float64,
Y_Model Float64,
RA_Model Float64,
DEC_Model Float64,
Chi2_Model Float32,
Flag_Model Int32,
Flux_Model Float32,
FluxErr_Model Float32,
Mag_Model Float32,
MagErr_Model Float32,
Flux_Bulge Float32,
FluxErr_Bulge Float32,
Mag_Bulge Float32,
MagErr_Bulge Float32,
Re_Bulge Float32,
ReErr_Bulge Float32,
E_Bulge Float32,
EErr_Bulge Float32,
PA_Bulge Float32,
PAErr_Bulge Float32,
Flux_Disk Float32,
FluxErr_Disk Float32,
Mag_Disk Float32,
MagErr_Disk Float32,
Re_Disk Float32,
ReErr_Disk Float32,
E_Disk Float32,
EErr_Disk Float32,
PA_Disk Float32,
PAErr_Disk Float32,
Ratio_Disk Float32,
RatioErr_Disk Float32,
Spread_Model Float32,
SpreadErr_Model Float32,
Filter String DEFAULT '',
Brick_Id Int32
)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/csst_msc_level2_catalog','{replica}')
PARTITION BY round(modulo(Brick_Id,64))
ORDER BY (level2_id,Brick_Id)
SETTINGS index_granularity = 8192;
CREATE TABLE IF NOT EXISTS csst.msc_level2_catalog_all ON CLUSTER default AS csst.msc_level2_catalog_local
ENGINE = Distributed(default, csst, msc_level2_catalog_local, rand());
CREATE DATABASE IF NOT EXISTS ephem ON CLUSTER default;
CREATE TABLE IF NOT EXISTS ephem.gaia3_source_local ON CLUSTER default
(
solution_id Int64,
designation String DEFAULT '',
source_id Int64,
random_index Int64,
ref_epoch Float64,
ra Float64,
ra_error Float64,
`dec` Float64,
dec_error Float64,
parallax Float64,
parallax_error Float64,
parallax_over_error Float64,
pm Float64,
pmra Float64,
pmra_error Float64,
pmdec Float64,
pmdec_error Float64,
ra_dec_corr Float64,
ra_parallax_corr Float64,
ra_pmra_corr Float64,
ra_pmdec_corr Float64,
dec_parallax_corr Float64,
dec_pmra_corr Float64,
dec_pmdec_corr Float64,
parallax_pmra_corr Float64,
parallax_pmdec_corr Float64,
pmra_pmdec_corr Float64,
astrometric_n_obs_al Int64,
astrometric_n_obs_ac Int64,
astrometric_n_good_obs_al Int64,
astrometric_n_bad_obs_al Int64,
astrometric_gof_al Float64,
astrometric_chi2_al Float64,
astrometric_excess_noise Float64,
astrometric_excess_noise_sig Float64,
astrometric_params_solved Int64,
astrometric_primary_flag UInt8,
nu_eff_used_in_astrometry Float64,
pseudocolour Float64,
pseudocolour_error Float64,
ra_pseudocolour_corr Float64,
dec_pseudocolour_corr Float64,
parallax_pseudocolour_corr Float64,
pmra_pseudocolour_corr Float64,
pmdec_pseudocolour_corr Float64,
astrometric_matched_transits Int64,
visibility_periods_used Int64,
astrometric_sigma5d_max Float64,
matched_transits Int64,
new_matched_transits Int64,
matched_transits_removed Int64,
ipd_gof_harmonic_amplitude Float64,
ipd_gof_harmonic_phase Float64,
ipd_frac_multi_peak Int64,
ipd_frac_odd_win Int64,
ruwe Float64,
scan_direction_strength_k1 Float64,
scan_direction_strength_k2 Float64,
scan_direction_strength_k3 Float64,
scan_direction_strength_k4 Float64,
scan_direction_mean_k1 Float64,
scan_direction_mean_k2 Float64,
scan_direction_mean_k3 Float64,
scan_direction_mean_k4 Float64,
duplicated_source UInt8,
phot_g_n_obs Int64,
phot_g_mean_flux Float64,
phot_g_mean_flux_error Float64,
phot_g_mean_flux_over_error Float64,
phot_g_mean_mag Float64,
phot_bp_n_obs Int64,
phot_bp_mean_flux Float64,
phot_bp_mean_flux_error Float64,
phot_bp_mean_flux_over_error Float64,
phot_bp_mean_mag Float64,
phot_rp_n_obs Int64,
phot_rp_mean_flux Float64,
phot_rp_mean_flux_error Float64,
phot_rp_mean_flux_over_error Float64,
phot_rp_mean_mag Float64,
phot_bp_rp_excess_factor Float64,
phot_bp_n_contaminated_transits Float64,
phot_bp_n_blended_transits Float64,
phot_rp_n_contaminated_transits Float64,
phot_rp_n_blended_transits Float64,
phot_proc_mode Float64,
bp_rp Float64,
bp_g Float64,
g_rp Float64,
radial_velocity Float64,
radial_velocity_error Float64,
rv_method_used Float64,
rv_nb_transits Float64,
rv_nb_deblended_transits Float64,
rv_visibility_periods_used Float64,
rv_expected_sig_to_noise Float64,
rv_renormalised_gof Float64,
rv_chisq_pvalue Float64,
rv_time_duration Float64,
rv_amplitude_robust Float64,
rv_template_teff Float64,
rv_template_logg Float64,
rv_template_fe_h Float64,
rv_atm_param_origin Float64,
vbroad Float64,
vbroad_error Float64,
vbroad_nb_transits Float64,
grvs_mag Float64,
grvs_mag_error Float64,
grvs_mag_nb_transits Float64,
rvs_spec_sig_to_noise Float64,
phot_variable_flag String DEFAULT '',
l Float64,
b Float64,
ecl_lon Float64,
ecl_lat Float64,
in_qso_candidates UInt8,
in_galaxy_candidates UInt8,
non_single_star Int64,
has_xp_continuous UInt8,
has_xp_sampled UInt8,
has_rvs UInt8,
has_epoch_photometry UInt8,
has_epoch_rv UInt8,
has_mcmc_gspphot UInt8,
has_mcmc_msc UInt8,
in_andromeda_survey UInt8,
classprob_dsc_combmod_quasar Float64,
classprob_dsc_combmod_galaxy Float64,
classprob_dsc_combmod_star Float64,
teff_gspphot Float64,
teff_gspphot_lower Float64,
teff_gspphot_upper Float64,
logg_gspphot Float64,
logg_gspphot_lower Float64,
logg_gspphot_upper Float64,
mh_gspphot Float64,
mh_gspphot_lower Float64,
mh_gspphot_upper Float64,
distance_gspphot Float64,
distance_gspphot_lower Float64,
distance_gspphot_upper Float64,
azero_gspphot Float64,
azero_gspphot_lower Float64,
azero_gspphot_upper Float64,
ag_gspphot Float64,
ag_gspphot_lower Float64,
ag_gspphot_upper Float64,
ebpminrp_gspphot Float64,
ebpminrp_gspphot_lower Float64,
ebpminrp_gspphot_upper Float64,
libname_gspphot String DEFAULT '',
NS8HIdx Int32,
NS16HIdx Int32,
NS32HIdx Int32,
NS64HIdx Int32,
fileIdx Int32
)
ENGINE = ReplicatedMergeTree('/clickhouse/tables/{shard}/gaia3','{replica}')
PARTITION BY round(modulo(NS8HIdx,20))
ORDER BY (NS8HIdx,NS16HIdx,NS32HIdx,NS64HIdx,fileIdx)
SETTINGS index_granularity = 8192;
CREATE TABLE IF NOT EXISTS ephem.gaia3_source_all ON CLUSTER default AS ephem.gaia3_source_local
ENGINE = Distributed(default, ephem, gaia3_source_local, rand());Import Data
podman run --rm -v /tmp/deploy:/tmp/deploy -v /tmp/native:/share/diskdata/gaia3 \
--entrypoint clickhouse-client \
-it m.daocloud.io/docker.io/clickhouse/clickhouse-server:23.11.5.29-alpine \
--host ${CK_HOST} \
--port 30900 \
--user admin \
--password ${CK_PASSWORD} \
--query "insert into ephem.gaia3_source_all from infile /share/diskdata/gaia3/100751.native FORMAT Native" 1. first you need to run a container
podman run --rm -u root -v /data:/data:ro --entrypoint tail -it docker.io/bitnami/clickhouse:23.10.5-debian-11-r0 -f /etc/hosts
## get into pod
# podman exec -it <$container_id> bashdir /data saved all the xxx.native file
2. when you in pod, you need to create a shell script run.sh
#!/bin/sh
INDEX=0
for filename in $(ls -l /data | awk '{print $NF}');
do
INDEX=$(($INDEX+1))
echo $(date) $INDEX $filename >> import.log
clickhouse-client -h 172.27.253.66 --port=30900 --user admin --password YEkvhrhEaeZTf7E0 \
--query "insert into ephem.gaia3_source_local FORMAT Native" < /data/$filename \
|| echo $filename >> import_err.log
doneThen you can use sh run.sh to import data into clickhouse and view import_err.log to trace the error.
Import CSST Postgres Data
Preliminary
- Postgresql has installed though argo-workflow, if not check link
- Postgresql server pod named is
app-postgresqland in namespaceapplication
Warning
if the pod name and namespace isn’t match, you might need to modify following shell.
Download SQL file
wget https://inner-gitlab.citybrain.org/csst/csst-py/-/raw/main/deploy/pg/init_dfs_table_data.sql -O init_dfs_table_data.sqlTODO the content we download is all html, fff
Using import tool
POSTGRES_PASSWORD=$(kubectl -n application get secret postgresql-credentials -o jsonpath='{.data.postgres-password}' | base64 -d) \
POD_NAME=$(kubectl get pod -n application -l "app.kubernetes.io/name=postgresql-tool" -o jsonpath="{.items[0].metadata.name}") \
&& export SQL_FILENAME="init_dfs_table_data.sql" \
&& kubectl -n application cp ${SQL_FILENAME} ${POD_NAME}:/tmp/${SQL_FILENAME} \
&& kubectl -n application exec -it deployment/app-postgresql-tool -- bash -c \
'echo "CREATE DATABASE csst;" | PGPASSWORD="$POSTGRES_PASSWORD" \
psql --host app-postgresql.application -U postgres -d postgres -p 5432' \
&& kubectl -n application exec -it deployment/app-postgresql-tool -- bash -c \
'PGPASSWORD="$POSTGRES_PASSWORD" psql --host app-postgresql.application \
-U postgres -d csst -p 5432 < /tmp/init_dfs_table_data.sql'Deploy App
Subsections of Deploy App
Deploy CCDS Server
Preliminary
- MariaDB has installed though argo-workflow, if not check link
- Redis has installed though argo-workflow, if not check link
- And init mariadb has finished, if not check link
nfs-external-nasnas server has initialized as somewhere, if not check link
Steps
1. decode mariadb password
kubectl -n application get secret mariadb-credentials -o jsonpath='{.data.mariadb-root-password}' | base64 -d2. prepare combo-data-pvc.yaml
---
apiVersion: "v1"
kind: "PersistentVolumeClaim"
metadata:
name: "ccds-data-pvc"
namespace: "application"
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: "200Gi"
storageClassName: "nfs-external-nas"
status:
accessModes:
- "ReadWriteMany"
capacity:
storage: "200Gi"
---
apiVersion: "v1"
kind: "PersistentVolumeClaim"
metadata:
name: "csst-data-pvc"
namespace: "application"
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: "200Gi"
storageClassName: "nfs-external-nas"
status:
accessModes:
- "ReadWriteMany"
capacity:
storage: "200Gi"3. prepare deploy-ccds-server.yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-ccds-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: ccds-server
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: nginx
targetRevision: 15.10.4
helm:
releaseName: ccds-server
values: |
image:
registry: cr.registry.res.cloud.wuxi-yqgcy.cn
repository: csst/ccds
tag: V1-argo-test
pullPolicy: IfNotPresent
extraEnvVars:
- name: TZ
value: Asia/Shanghai
- name: FLASK_DEBUG
value: "0"
- name: FLASK_ENV
value: "production"
- name: DATABASE_URL
value: "mysql://root:IqzfDQfjkzfNhsCS@app-mariadb.application:3306/ccds?charset=utf8"
- name: REDIS_HOST
value: "app-redis-master.application"
- name: REDIS_PWD
value: "THY7BxnEIOeecarE"
- name: REDIS_PORT
value: "6379"
- name: CSST_DFS_API_MODE
value: "cluster"
- name: CSST_DFS_GATEWAY
value: "csst-gateway.csst:80"
- name: CSST_DFS_APP_ID
value: "test"
- name: CSST_DFS_APP_TOKEN
value: "test"
containerSecurityContext:
enabled: false
replicaCount: 1
containerPorts:
http: 9000
extraVolumes:
- name: csst-data-pvc
persistentVolumeClaim:
claimName: csst-data-pvc
- name: ccds-data-pvc
persistentVolumeClaim:
claimName: ccds-data-pvc
extraVolumeMounts:
- mountPath: /csst-data
name: csst-data-pvc
- mountPath: /ccds-data
name: ccds-data-pvc
service:
type: ClusterIP
ports:
http: 9000
targetPort:
http: 9000
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/ccds-server ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/ccds-server4. create pvc resource
kubectl -n application apply -f combo-data-pvc.yaml5. subimit to argo workflow client
argo -n business-workflows submit deploy-ccds-server.yamlDeploy DFS Server
Preliminary
- MariaDB has installed though argo-workflow, if not check link
- Postgresql has installed though argo-workflow, if not check link
- And init mariadb has finished, if not check link
- And init postgresql has finished, if not check link
Steps
0. prepare csst.yaml
global:
fitsFileRootDir: /opt/temp/csst/fits_file
fileExternalPrefix: http://csst.astrolab.cn/file
tempDir: /tmp
etcd:
host: 0.0.0.0
port: 2379
redisChannel0:
host: app-redis-master.application
port: 6379
db_id: 0
channel0: channel0
passwd:
level0_list: single-image-reduction:data0
csst_db:
enabled: true
host: 172.27.253.66
port: 30173
user: "postgres"
passwd: "woQ8btfS44ei1Bbx"
db: "csst"
maxIdleConnection: 100
maxOpenConnection: 130
connMaxLifetime: 100
csst_db_seq:
enabled: true
host: 172.27.253.66
port: 30173
user: "postgres"
passwd: "woQ8btfS44ei1Bbx"
db: "csst"
maxIdleConnection: 100
maxOpenConnection: 130
connMaxLifetime: 100
csst_ck:
enabled: true
url: tcp://app-clickhouse-service-external.application:30900?compress=true
host: app-clickhouse-service-external
clusters: app-clickhouse-service-external:30900
port: 30900
db: csst
user: admin
passwd: "YEkvhrhEaeZTf7E0"
gateway:
enabled: true
url: csst-gateway.application:31280
ephem_ck_db:
enabled: true
url: tcp://app-clickhouse-service-external.application:30900?compress=true
host: app-clickhouse-service-external
port: 30900
db: ephem
user: admin
passwd: "YEkvhrhEaeZTf7E0"
maxIdleConnection: 100
maxOpenConnection: 130
connMaxLifetime: 100
csst_doris_db:
enabled: true
host: app-mariadb
port: 3306
user: "root"
passwd: "IqzfDQfjkzfNhsCS"
db: "ccds"
maxIdleConnection: 100
maxOpenConnection: 130
connMaxLifetime: 100
redis:
enabled: true
conn: app-redis-master.application:6379
dbNum: 8
password:
timeout: 3000
sentinel:
master: csstMaster
nodes: app-redis-master.application:6379
jwt:
secretKey: W6VjDud2W1kMG3BicbMNlGgI4ZfcoHtMGLWr
auth_srv:
name: net.cnlab.csst.srv.auth.
address:
port: 9030
zap:
level: error
development: true
logFileDir:
outputPaths: []
maxSize: 50
maxBackups: 200
maxAge: 10
dfs_srv:
name: net.cnlab.csst.srv.dfs-srv.
address:
port: 9100
ephem_srv:
name: net.cnlab.csst.srv.ephem.
address:
port: 9060
ephem_rest:
name: net.cnlab.csst.srv.ephem-rest.
address:
port: 9068
user_srv:
name: net.cnlab.csst.srv.user.
address:
port: 9090
fits_srv:
name: net.cnlab.csst.srv.fits.
address:
port: 90021. creat csst-credentials
kubectl -n application create secret generic csst-credentials --from-file=./csst.yaml2. [Optional] prepare csst-data-pvc.yaml
apiVersion: "v1"
kind: "PersistentVolumeClaim"
metadata:
name: "csst-data-pvc"
namespace: "application"
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: "200Gi"
storageClassName: "nfs-external-nas"
status:
accessModes:
- "ReadWriteMany"
capacity:
storage: "200Gi"3. prepare deploy-dfs-server.yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-dfs-server-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: dfs-server
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: nginx
targetRevision: 15.10.4
helm:
releaseName: dfs-server
values: |
image:
registry: cr.registry.res.cloud.wuxi-yqgcy.cn
repository: mirror/dfs-server
tag: v240306-r1
pullPolicy: IfNotPresent
extraEnvVars:
- name: ZONEINFO
value: /opt/zoneinfo.zip
- name: TZ
value: Asia/Shanghai
- name: CONFIG_FILE_PATH
value: /app/csst.yaml
- name: PYTHONPATH
value: /work/csst-py/:/work/csst-py/dfs-srv:/work/packages:/work/csst-dfs-proto-py:/work/csst-dfs-commons:/work/csst-dfs-base
containerSecurityContext:
enabled: false
containerPorts:
http: 9100
extraVolumes:
- name: csst-data-pvc
persistentVolumeClaim:
claimName: csst-data-pvc
- name: dfs-csst-config
secret:
secretName: csst-credentials
extraVolumeMounts:
- mountPath: /share/dfs
name: csst-data-pvc
- mountPath: /app
name: dfs-csst-config
service:
type: ClusterIP
ports:
http: 9100
targetPort:
http: http
ingress:
enabled: false
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/dfs-server ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/dfs-server4. [Optional] create pvc resource
kubectl -n application apply -f csst-data-pvc.yaml5. subimit to argo workflow client
argo -n business-workflows submit deploy-dfs-server.yamlDeploy DFS Ephem
Preliminary
- MariaDB has installed though argo-workflow, if not check link
- Postgresql has installed though argo-workflow, if not check link
- And init mariadb has finished, if not check link
- And init postgresql has finished, if not check link
Steps
1. [Optional] creat csst-credentials
kubectl -n business-workflows create secret generic csst-credentials --from-file=./csst.yaml3. prepare deploy-dfs-ephem.yaml
apiVersion: argoproj.io/v1alpha1
kind: Workflow
metadata:
generateName: deploy-dfs-ephem-
spec:
entrypoint: entry
artifactRepositoryRef:
configmap: artifact-repositories
key: default-artifact-repository
serviceAccountName: argo-workflow
templates:
- name: entry
inputs:
parameters:
- name: argocd-server
value: argo-cd-argocd-server.argocd:443
- name: insecure-option
value: --insecure
dag:
tasks:
- name: apply
template: apply
- name: prepare-argocd-binary
template: prepare-argocd-binary
dependencies:
- apply
- name: sync
dependencies:
- prepare-argocd-binary
template: sync
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: wait
dependencies:
- sync
template: wait
arguments:
artifacts:
- name: argocd-binary
from: "{{tasks.prepare-argocd-binary.outputs.artifacts.argocd-binary}}"
parameters:
- name: argocd-server
value: "{{inputs.parameters.argocd-server}}"
- name: insecure-option
value: "{{inputs.parameters.insecure-option}}"
- name: apply
resource:
action: apply
manifest: |
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: dfs-ephem
namespace: argocd
spec:
syncPolicy:
syncOptions:
- CreateNamespace=true
project: default
source:
repoURL: https://charts.bitnami.com/bitnami
chart: nginx
targetRevision: 15.10.4
helm:
releaseName: dfs-ephem
values: |
image:
registry: cr.registry.res.cloud.wuxi-yqgcy.cn
repository: mirror/dfs-server
tag: v240306-r1
pullPolicy: IfNotPresent
command:
- python
args:
- /work/csst-py/ephem-srv/csst_ephem_srv/server.py
extraEnvVars:
- name: ZONEINFO
value: /opt/zoneinfo.zip
- name: TZ
value: Asia/Shanghai
- name: CONFIG_FILE_PATH
value: /app/csst.yaml
- name: CONFIG_SERVER
value: "cdfs-config.csst:9610"
- name: PYTHONPATH
value: /work/csst-py/ephem-srv:/work/csst-py/:/work/csst-py/dfs-srv:/work/packages:/work/csst-dfs-proto-py:/work/csst-dfs-commons:/work/csst-dfs-base
containerSecurityContext:
enabled: false
replicaCount: 1
containerPorts:
http: 9060
service:
type: ClusterIP
ports:
http: 9060
targetPort:
http: http
ingress:
enabled: false
extraVolumes:
- name: dfs-csst-config
secret:
secretName: csst-credentials
extraVolumeMounts:
- mountPath: /app
name: dfs-csst-config
destination:
server: https://kubernetes.default.svc
namespace: application
- name: prepare-argocd-binary
inputs:
artifacts:
- name: argocd-binary
path: /tmp/argocd
mode: 755
http:
url: https://files.m.daocloud.io/github.com/argoproj/argo-cd/releases/download/v2.9.3/argocd-linux-amd64
outputs:
artifacts:
- name: argocd-binary
path: "{{inputs.artifacts.argocd-binary.path}}"
container:
image: m.daocloud.io/docker.io/library/fedora:39
command:
- sh
- -c
args:
- |
ls -l {{inputs.artifacts.argocd-binary.path}}
- name: sync
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
- name: WITH_PRUNE_OPTION
value: --prune
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app sync argocd/dfs-ephem ${WITH_PRUNE_OPTION} --timeout 300
- name: wait
inputs:
artifacts:
- name: argocd-binary
path: /usr/local/bin/argocd
parameters:
- name: argocd-server
- name: insecure-option
value: ""
container:
image: m.daocloud.io/docker.io/library/fedora:39
env:
- name: ARGOCD_USERNAME
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: username
- name: ARGOCD_PASSWORD
valueFrom:
secretKeyRef:
name: argocd-login-credentials
key: password
command:
- sh
- -c
args:
- |
set -e
export ARGOCD_SERVER={{inputs.parameters.argocd-server}}
export INSECURE_OPTION={{inputs.parameters.insecure-option}}
export ARGOCD_USERNAME=${ARGOCD_USERNAME:-admin}
argocd login ${INSECURE_OPTION} --username ${ARGOCD_USERNAME} --password ${ARGOCD_PASSWORD} ${ARGOCD_SERVER}
argocd app wait argocd/dfs-ephem3. subimit to argo workflow client
argo -n business-workflows submit deploy-dfs-ephem.yamlDeploy CSST Gateway
Preliminary
- DFS server has installed though argo-workflow, if not check link
- DFS ephem has installed though argo-workflow, if not check link
Warning
if the dfs server, dfs ephem and namespace isn’t match, you might need to modify following shell.
Steps
1. prepare csst-gateway.configmap.yaml
worker_processes auto;
events {
worker_connections 1024;
}
http {
log_format main '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" "$http_x_forwarded_for"';
default_type application/octet-stream;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
client_max_body_size 10000m;
types_hash_max_size 2048;
underscores_in_headers on;
reset_timedout_connection on;
keepalive_timeout 960;
client_header_timeout 960;
client_body_timeout 960;
proxy_connect_timeout 960;
proxy_read_timeout 960;
proxy_send_timeout 960;
send_timeout 960;
upstream grpc_dfs {
server dfs-server-nginx.application:9100 weight=1;
}
upstream grpc_ephem {
server dfs-ephem-nginx.application:9060 weight=1;
}
server {
listen 80 http2;
location ^~ /dfs. {
grpc_pass_header Host;
grpc_pass_header X-Real-IP;
grpc_set_header Host $host;
grpc_set_header X-Real-IP $remote_addr;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
grpc_socket_keepalive on;
grpc_read_timeout 960;
grpc_send_timeout 960;
proxy_read_timeout 960;
proxy_send_timeout 960;
grpc_pass grpc://grpc_dfs;
}
location ^~ /dfs.ephem. {
grpc_pass_header Host;
grpc_pass_header X-Real-IP;
grpc_set_header Host $host;
grpc_set_header X-Real-IP $remote_addr;
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
grpc_socket_keepalive on;
grpc_read_timeout 960;
grpc_send_timeout 960;
proxy_read_timeout 960;
proxy_send_timeout 960;
grpc_pass grpc://grpc_ephem;
}
}
server {
listen 81;
location /search/v2 {
proxy_pass http://0.0.0.0:9068/search;
proxy_pass_request_headers on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Host $server_name;
}
location / {
root /share/dfs;
autoindex on;
}
}
}2. [Optional] prepare csst-data-pvc.yaml
apiVersion: "v1"
kind: "PersistentVolumeClaim"
metadata:
name: "csst-data-pvc"
namespace: "application"
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: "200Gi"
storageClassName: "nfs-external-nas"
status:
accessModes:
- "ReadWriteMany"
capacity:
storage: "200Gi"3. prepare deploy-csst-gateway.yaml
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: csst-gateway
name: csst-gateway
namespace: application
spec:
ports:
- name: http
port: 80
nodePort: 31280
protocol: TCP
targetPort: 80
- name: search
port: 81
nodePort: 31281
targetPort: 81
selector:
app.kubernetes.io/name: csst-gateway
type: NodePort
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/name: csst-gateway
name: csst-gateway
namespace: application
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: csst-gateway
template:
metadata:
labels:
app.kubernetes.io/name: csst-gateway
spec:
containers:
- env:
- name: TZ
value: Asia/Shanghai
image: docker.io/library/nginx:1.19.9-alpine
imagePullPolicy: IfNotPresent
name: csst-gateway
ports:
- containerPort: 80
name: http
- containerPort: 81
name: search
volumeMounts:
- mountPath: /etc/nginx
name: csst-gateway-config
- mountPath: /share/dfs
name: csst-data-pvc
volumes:
- name: csst-gateway-config
configMap:
name: csst-gateway-configmap
items:
- key: csst-gateway.configmap.yaml
path: nginx.conf
- name: csst-data-pvc
persistentVolumeClaim:
claimName: csst-data-pvc
restartPolicy: Always
4. create configMap based on csst-gateway.configmap.yaml
kubectl -n application create configmap csst-gateway-configmap --from-file=csst-gateway.configmap.yaml -o yaml --dry-run=client | kubectl -n application apply -f -5. [Optional] create pvc resource
kubectl -n application apply -f csst-data-pvc.yaml6. apply to k8s
kubectl -n application apply -f deploy-csst-gateway.yamlMbi L1 Job
Subsections of Mbi L1 Job
Prepare L1 Fixed Data
Preliminary
ossutilhas been installed.- PVC
csst-data-pvchas initialized, if not check link - PVC
ccds-data-pvchas initialized, if not check link - PVC
csst-msc-l1-mbi-aux-pvchas initialized, if not check link
copy data from OSS
ossutil cp -r oss://csst-data/CSST-20240312/dfs/ /data/nfs/data/application-csst-data-pvc-pvc-42f5745d-8379-462e-ba5b-3034e178eb7aossutil cp -r oss://csst-data/CSST-20240312/crdsdata/data /data/nfs/data/application-ccds-data-pvc-pvc-d773d4f7-1391-4bee-9711-df265db405fdossutil cp -r oss://csst-data/CSST-20240312/pipeline.tar.gz /data/nfs/data/application-csst-msc-l1-mbi-aux-pvc-pvc-e328eb62-d3ff-4908-b504-0413b4ea7e99/And then you need to unzip /pipeline/aux dir in the tar file
tar -xvf /data/nfs/data/application-csst-msc-l1-mbi-aux-pvc-pvc-e328eb62-d3ff-4908-b504-0413b4ea7e99/pipeline.tar.gz pipeline/auxRun L1 Job
Preliminary
1. prepare csst-msc-l1-mbi-aux-pvc.yaml
apiVersion: "v1"
kind: "PersistentVolumeClaim"
metadata:
name: "csst-msc-l1-mbi-aux-pvc"
namespace: "application"
spec:
accessModes:
- "ReadWriteMany"
resources:
requests:
storage: "200Gi"
storageClassName: "nfs-external-nas"
status:
accessModes:
- "ReadWriteMany"
capacity:
storage: "200Gi"2. prepare csst-msc-l1-mbi.job.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: csst-msc-l1-mbi
spec:
template:
spec:
securityContext:
runAsUser: 0
runAsGroup: 0
containers:
- name: csst-msc-l1-mbi
env:
- name: CSST_DFS_API_MODE
value: cluster
- name: CSST_DFS_GATEWAY
value: csst-gateway.application:80
- name: CSST_DFS_APP_ID
value: test
- name: CSST_DFS_APP_TOKEN
value: test
- name: CCDS_SERVER_URL
value: ccds-server-nginx.application:9000
- name: CSST_DFS_ROOT
value: /dfsroot:ro
- name: CSST_CRDS_ROOT
value: /crdsroot:ro
- name: CSST_AUX_DIR
value: /pipeline/aux:ro
image: cr.registry.res.cloud.wuxi-yqgcy.cn/mirror/csst-msc-l1-mbi:v240328
command:
- tail
args:
- -f
- /etc/hosts
volumeMounts:
- mountPath: /pipeline/input
name: csst-msc-l1-mbi-input
- mountPath: /pipeline/output
name: csst-msc-l1-mbi-output
- mountPath: /pipeline/aux
name: csst-msc-l1-mbi-aux-pvc
- mountPath: /dfsroot
name: csst-data-pvc
- mountPath: /crdsroot
name: ccds-data-pvc
volumes:
- name: csst-msc-l1-mbi-input
emptyDir: {}
- name: csst-msc-l1-mbi-output
emptyDir: {}
- name: csst-msc-l1-mbi-aux-pvc
persistentVolumeClaim:
claimName: csst-msc-l1-mbi-aux-pvc
- name: csst-data-pvc
persistentVolumeClaim:
claimName: csst-data-pvc
- name: ccds-data-pvc
persistentVolumeClaim:
claimName: ccds-data-pvc
restartPolicy: OnFailure2. prepare csst-msc-l1-mbi.deploy.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: csst-msc-l1-mbi
labels:
app.kubernetes.io/name: csst-msc-l1-mbi
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: csst-msc-l1-mbi
template:
metadata:
labels:
app.kubernetes.io/name: csst-msc-l1-mbi
spec:
containers:
- name: csst-msc-l1-mbi
image: cr.registry.res.cloud.wuxi-yqgcy.cn/mirror/csst-msc-l1-mbi:v240328
imagePullPolicy: IfNotPresent
env:
- name: CSST_DFS_API_MODE
value: "cluster"
- name: CSST_DFS_GATEWAY
value: "csst-gateway.application:80"
- name: CSST_DFS_APP_ID
value: "test"
- name: CSST_DFS_APP_TOKEN
value: "test"
- name: CCDS_SERVER_URL
value: ccds-server-nginx.application:9000
- name: CSST_DFS_ROOT
value: "/dfsroot"
- name: CSST_CRDS_ROOT
value: "/crdsroot"
- name: CSST_AUX_DIR
value: "/pipeline/aux"
- name: CSST_POS0_USER
value: "-"
- name: CSST_POS0_IP
value: "-"
- name: CSST_CICD_TEST_OUTPUT
value: "/pipeline/output"
- name: TZ
value: Asia/Shanghai
command:
- /bin/bash
args:
- -c
- tail -f /dev/null
volumeMounts:
- mountPath: /pipeline/input
name: csst-msc-l1-mbi-input
- mountPath: /pipeline/output
name: csst-msc-l1-mbi-output
- mountPath: /pipeline/aux
name: csst-msc-l1-mbi-aux-pvc
- mountPath: /dfsroot
name: csst-data-pvc
- mountPath: /ccdsroot
name: ccds-data-pvc
volumes:
- name: csst-msc-l1-mbi-input
emptyDir: {}
- name: csst-msc-l1-mbi-output
emptyDir: {}
- name: csst-msc-l1-mbi-aux-pvc
persistentVolumeClaim:
claimName: csst-msc-l1-mbi-aux-pvc
- name: csst-data-pvc
persistentVolumeClaim:
claimName: csst-data-pvc
- name: ccds-data-pvc
persistentVolumeClaim:
claimName: ccds-data-pvc2. prepare csst-msc-l1-mbi.final.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: csst-msc-l1-mbi
spec:
template:
spec:
securityContext:
runAsUser: 0
runAsGroup: 0
containers:
- name: csst-msc-l1-mbi
env:
- name: CSST_DFS_API_MODE
value: cluster
- name: CSST_DFS_GATEWAY
value: csst-gateway.application:80
- name: CSST_DFS_APP_ID
value: test
- name: CSST_DFS_APP_TOKEN
value: test
- name: CCDS_SERVER_URL
value: https://ccds-server-nginx.application:9000
- name: CSST_DFS_ROOT
value: /dfsroot:ro
- name: CSST_CRDS_ROOT
value: /ccdsroot:ro
- name: CSST_AUX_DIR
value: /pipeline/aux:ro
image: cr.registry.res.cloud.wuxi-yqgcy.cn/mirror/csst-msc-l1-mbi:v240328
command: ["python", "/pipeline/src/run.py", "--obs-id=10160000001", "--device=cpu", "--n-jobs=18", "--n-jobs-gpu=9", "--clean-l0", "--clean-l1"]
volumeMounts:
- mountPath: /pipeline/input
name: csst-msc-l1-mbi-input
- mountPath: /pipeline/output
name: csst-msc-l1-mbi-output
- mountPath: /pipeline/aux
name: csst-msc-l1-mbi-aux-pvc
- mountPath: /dfsroot
name: csst-data-pvc
- mountPath: /ccdsroot
name: ccds-data-pvc
volumes:
- name: csst-msc-l1-mbi-input
emptyDir: {}
- name: csst-msc-l1-mbi-output
emptyDir: {}
- name: csst-msc-l1-mbi-aux-pvc
persistentVolumeClaim:
claimName: csst-msc-l1-mbi-aux-pvc
- name: csst-data-pvc
persistentVolumeClaim:
claimName: csst-data-pvc
- name: ccds-data-pvc
persistentVolumeClaim:
claimName: ccds-data-pvc
restartPolicy: OnFailure3. [Optional] create pvc resource
kubectl -n application apply -f l1-mbi_job.pvc.yaml4.1 [Optional] delete on k8s
kubectl -n application delete -f csst-msc-l1-mbi.job.yaml 4.2 [Optional] apply on k8s
kubectl -n application apply -f csst-msc-l1-mbi.job.yaml4.1 [Optional] delete on k8s
kubectl -n application delete -f csst-msc-l1-mbi.deploy.yaml 4.2 [Optional] apply on k8s
kubectl -n application apply -f csst-msc-l1-mbi.deploy.yaml4.1 [Optional] delete on k8s
kubectl -n application delete -f csst-msc-l1-mbi.final.yaml 4.2 [Optional] apply on k8s
kubectl -n application apply -f csst-msc-l1-mbi.final.yaml5. exec into pod
kubectl -n application exec -it <$pod_id> -- bash6. run command in the pod
python /pipeline/src/run.py --obs-id=10160000001 --device=cpu --n-jobs=18 --n-jobs-gpu=9 --clean-l0 --clean-l1
