diff --git a/k8s/job-templates/dbf-import-job.yaml b/k8s/job-templates/dbf-import-job.yaml index b05aef3..069972c 100644 --- a/k8s/job-templates/dbf-import-job.yaml +++ b/k8s/job-templates/dbf-import-job.yaml @@ -8,6 +8,15 @@ spec: backoffLimit: 0 template: spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{JOB_HOST_KEY}} + operator: In + values: + - {{JOB_HOST_NAME}} containers: - name: importer image: {{IMAGE_REPO}}/databridge:{{IMAGE_TAG}} @@ -16,9 +25,9 @@ spec: - name: DATA_PVC_MOUNT_PATH value: "/data" - name: DBF_INPUT_DIR - value: "/data/data-import-export/dbf-input" + value: "/data/dbf-input" - name: MAPPING_FILE - value: "/data/data-import-export/disney-mapping.xlsx" + value: "/data/disney-mapping.xlsx" - name: DB_HOST value: "{{DB_HOST}}" - name: DB_PORT @@ -39,10 +48,10 @@ spec: resources: requests: cpu: "500m" - memory: "1Gi" + memory: "800Mi" limits: cpu: "1000m" - memory: "2Gi" + memory: "1700Mi" volumes: - name: data-volume persistentVolumeClaim: diff --git a/k8s/pv-disney.yaml b/k8s/pv-disney.yaml new file mode 100644 index 0000000..079c002 --- /dev/null +++ b/k8s/pv-disney.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: PersistentVolume +metadata: + name: essd-data-import-export-pv # 任意,但要保证唯一 +spec: + capacity: + storage: 40Gi # 与数据集配额一致即可 + accessModes: + - ReadWriteOnce + persistentVolumeReclaimPolicy: Retain # 删除 PV 时保留数据 + storageClassName: "" # 留空,防止动态 Provisioner 抢占 + volumeMode: Filesystem + local: + path: /disney-data + nodeAffinity: + required: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/hostname + operator: In + values: + - idrc-disney-1 diff --git a/k8s/pv.yaml b/k8s/pv-test.yaml similarity index 100% rename from k8s/pv.yaml rename to k8s/pv-test.yaml diff --git a/k8s/pvc-disney.yaml b/k8s/pvc-disney.yaml new file mode 100644 index 0000000..e6a7cda --- /dev/null +++ b/k8s/pvc-disney.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: data-import-export-pvc + namespace: default # 与 Job 同命名空间 +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 40Gi + storageClassName: "" # 必须与 PV 一致 + volumeName: essd-data-import-export-pv # ← 显式绑定到刚才创建的 PV \ No newline at end of file diff --git a/k8s/pvc.yaml b/k8s/pvc-test.yaml similarity index 100% rename from k8s/pvc.yaml rename to k8s/pvc-test.yaml diff --git a/scripts/deploy-dbf-import-disney.sh b/scripts/deploy-dbf-import-disney.sh new file mode 100644 index 0000000..520d1e1 --- /dev/null +++ b/scripts/deploy-dbf-import-disney.sh @@ -0,0 +1,55 @@ +#!/bin/bash +set -e + +# 默认配置 +JOB_ID=$(date +%Y%m%d-%H%M%S) +IMAGE_REPO=${IMAGE_REPO:-"harbor.dc.teramesh.cn/library/tools"} +IMAGE_TAG=${IMAGE_TAG:-"dev"} +BATCH_SIZE=${BATCH_SIZE:-"50"} +LOG_LEVEL=${LOG_LEVEL:-"INFO"} +DATA_PVC_NAME=${DATA_PVC_NAME:-"data-import-export-pvc"} +JOB_HOST_KEY=${JOB_HOST_KEY:-"kubernetes.io/hostname"} +JOB_HOST_NAME=${JOB_HOST_NAME:-"idrc-disney-1"} +# 数据库配置(使用时需要修改) +DB_HOST=${DB_HOST:-"db"} +DB_PORT=${DB_PORT:-"6432"} +DB_NAME=${DB_NAME:-"idrc"} +DB_USER=${DB_USER:-"teramesh"} +DB_PASSWORD=${DB_PASSWORD:-"2iqTCHwnf75stGBzM8le"} + +NAMESPACE=${NAMESPACE:-"default"} + +# 检查模板文件 +TEMPLATE_FILE="dbf-import-job.yaml" +if [ ! -f "$TEMPLATE_FILE" ]; then + echo "Template file not found: $TEMPLATE_FILE" + exit 1 +fi + +# 直接替换模板变量(不使用envsubst) +OUTPUT_FILE="dbf-import-job-${JOB_ID}.yaml" +sed -e "s|{{JOB_ID}}|$JOB_ID|g" \ + -e "s|{{NAMESPACE}}|$NAMESPACE|g" \ + -e "s|{{IMAGE_REPO}}|$IMAGE_REPO|g" \ + -e "s|{{IMAGE_TAG}}|$IMAGE_TAG|g" \ + -e "s|{{DATA_PVC_NAME}}|$DATA_PVC_NAME|g" \ + -e "s|{{JOB_HOST_KEY}}|$JOB_HOST_KEY|g" \ + -e "s|{{JOB_HOST_NAME}}|$JOB_HOST_NAME|g" \ + -e "s|{{DB_HOST}}|$DB_HOST|g" \ + -e "s|{{DB_PORT}}|$DB_PORT|g" \ + -e "s|{{DB_NAME}}|$DB_NAME|g" \ + -e "s|{{DB_USER}}|$DB_USER|g" \ + -e "s|{{DB_PASSWORD}}|$DB_PASSWORD|g" \ + -e "s|{{BATCH_SIZE}}|$BATCH_SIZE|g" \ + -e "s|{{LOG_LEVEL}}|$LOG_LEVEL|g" \ + "$TEMPLATE_FILE" > "$OUTPUT_FILE" + +# 部署前验证 +echo "Validating generated YAML..." +kubectl apply -f "$OUTPUT_FILE" -n "$NAMESPACE" --dry-run=client + +# 部署Job +kubectl apply -f "$OUTPUT_FILE" -n "$NAMESPACE" + +echo "Job deployed in namespace $NAMESPACE: dbf-import-job-${JOB_ID}" +echo "To view logs: kubectl logs job/dbf-import-job-${JOB_ID} -n $NAMESPACE" \ No newline at end of file diff --git a/scripts/deploy-dbf-import.sh b/scripts/deploy-dbf-import-test.sh similarity index 89% rename from scripts/deploy-dbf-import.sh rename to scripts/deploy-dbf-import-test.sh index 77316eb..e8081e2 100644 --- a/scripts/deploy-dbf-import.sh +++ b/scripts/deploy-dbf-import-test.sh @@ -8,6 +8,8 @@ IMAGE_TAG=${IMAGE_TAG:-"dev"} BATCH_SIZE=${BATCH_SIZE:-"1000"} LOG_LEVEL=${LOG_LEVEL:-"INFO"} DATA_PVC_NAME=${DATA_PVC_NAME:-"data-import-export-pvc"} +JOB_HOST_KEY=${JOB_HOST_KEY:-"kubernetes.io/hostname"} +JOB_HOST_NAME=${JOB_HOST_NAME:-"idrc-disney-1"} # 数据库配置(使用时需要修改) DB_HOST=${DB_HOST:-"test-db.db.svc.cluster.local"} DB_PORT=${DB_PORT:-"6432"} @@ -31,6 +33,8 @@ sed -e "s|{{JOB_ID}}|$JOB_ID|g" \ -e "s|{{IMAGE_REPO}}|$IMAGE_REPO|g" \ -e "s|{{IMAGE_TAG}}|$IMAGE_TAG|g" \ -e "s|{{DATA_PVC_NAME}}|$DATA_PVC_NAME|g" \ + -e "s|{{JOB_HOST_KEY}}|$JOB_HOST_KEY|g" \ + -e "s|{{JOB_HOST_NAME}}|$JOB_HOST_NAME|g" \ -e "s|{{DB_HOST}}|$DB_HOST|g" \ -e "s|{{DB_PORT}}|$DB_PORT|g" \ -e "s|{{DB_NAME}}|$DB_NAME|g" \