导入dbf文件到current_states
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
mingsheng.li 2025-07-28 20:49:44 +08:00
parent 496276d513
commit 73d163fe0f
7 changed files with 107 additions and 4 deletions

View File

@ -8,6 +8,15 @@ spec:
backoffLimit: 0
template:
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: {{JOB_HOST_KEY}}
operator: In
values:
- {{JOB_HOST_NAME}}
containers:
- name: importer
image: {{IMAGE_REPO}}/databridge:{{IMAGE_TAG}}
@ -16,9 +25,9 @@ spec:
- name: DATA_PVC_MOUNT_PATH
value: "/data"
- name: DBF_INPUT_DIR
value: "/data/data-import-export/dbf-input"
value: "/data/dbf-input"
- name: MAPPING_FILE
value: "/data/data-import-export/disney-mapping.xlsx"
value: "/data/disney-mapping.xlsx"
- name: DB_HOST
value: "{{DB_HOST}}"
- name: DB_PORT
@ -39,10 +48,10 @@ spec:
resources:
requests:
cpu: "500m"
memory: "1Gi"
memory: "800Mi"
limits:
cpu: "1000m"
memory: "2Gi"
memory: "1700Mi"
volumes:
- name: data-volume
persistentVolumeClaim:

22
k8s/pv-disney.yaml Normal file
View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: PersistentVolume
metadata:
name: essd-data-import-export-pv # 任意,但要保证唯一
spec:
capacity:
storage: 40Gi # 与数据集配额一致即可
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain # 删除 PV 时保留数据
storageClassName: "" # 留空,防止动态 Provisioner 抢占
volumeMode: Filesystem
local:
path: /disney-data
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- idrc-disney-1

13
k8s/pvc-disney.yaml Normal file
View File

@ -0,0 +1,13 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: data-import-export-pvc
namespace: default # 与 Job 同命名空间
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 40Gi
storageClassName: "" # 必须与 PV 一致
volumeName: essd-data-import-export-pv # ← 显式绑定到刚才创建的 PV

View File

@ -0,0 +1,55 @@
#!/bin/bash
set -e
# 默认配置
JOB_ID=$(date +%Y%m%d-%H%M%S)
IMAGE_REPO=${IMAGE_REPO:-"harbor.dc.teramesh.cn/library/tools"}
IMAGE_TAG=${IMAGE_TAG:-"dev"}
BATCH_SIZE=${BATCH_SIZE:-"50"}
LOG_LEVEL=${LOG_LEVEL:-"INFO"}
DATA_PVC_NAME=${DATA_PVC_NAME:-"data-import-export-pvc"}
JOB_HOST_KEY=${JOB_HOST_KEY:-"kubernetes.io/hostname"}
JOB_HOST_NAME=${JOB_HOST_NAME:-"idrc-disney-1"}
# 数据库配置(使用时需要修改)
DB_HOST=${DB_HOST:-"db"}
DB_PORT=${DB_PORT:-"6432"}
DB_NAME=${DB_NAME:-"idrc"}
DB_USER=${DB_USER:-"teramesh"}
DB_PASSWORD=${DB_PASSWORD:-"2iqTCHwnf75stGBzM8le"}
NAMESPACE=${NAMESPACE:-"default"}
# 检查模板文件
TEMPLATE_FILE="dbf-import-job.yaml"
if [ ! -f "$TEMPLATE_FILE" ]; then
echo "Template file not found: $TEMPLATE_FILE"
exit 1
fi
# 直接替换模板变量不使用envsubst
OUTPUT_FILE="dbf-import-job-${JOB_ID}.yaml"
sed -e "s|{{JOB_ID}}|$JOB_ID|g" \
-e "s|{{NAMESPACE}}|$NAMESPACE|g" \
-e "s|{{IMAGE_REPO}}|$IMAGE_REPO|g" \
-e "s|{{IMAGE_TAG}}|$IMAGE_TAG|g" \
-e "s|{{DATA_PVC_NAME}}|$DATA_PVC_NAME|g" \
-e "s|{{JOB_HOST_KEY}}|$JOB_HOST_KEY|g" \
-e "s|{{JOB_HOST_NAME}}|$JOB_HOST_NAME|g" \
-e "s|{{DB_HOST}}|$DB_HOST|g" \
-e "s|{{DB_PORT}}|$DB_PORT|g" \
-e "s|{{DB_NAME}}|$DB_NAME|g" \
-e "s|{{DB_USER}}|$DB_USER|g" \
-e "s|{{DB_PASSWORD}}|$DB_PASSWORD|g" \
-e "s|{{BATCH_SIZE}}|$BATCH_SIZE|g" \
-e "s|{{LOG_LEVEL}}|$LOG_LEVEL|g" \
"$TEMPLATE_FILE" > "$OUTPUT_FILE"
# 部署前验证
echo "Validating generated YAML..."
kubectl apply -f "$OUTPUT_FILE" -n "$NAMESPACE" --dry-run=client
# 部署Job
kubectl apply -f "$OUTPUT_FILE" -n "$NAMESPACE"
echo "Job deployed in namespace $NAMESPACE: dbf-import-job-${JOB_ID}"
echo "To view logs: kubectl logs job/dbf-import-job-${JOB_ID} -n $NAMESPACE"

View File

@ -8,6 +8,8 @@ IMAGE_TAG=${IMAGE_TAG:-"dev"}
BATCH_SIZE=${BATCH_SIZE:-"1000"}
LOG_LEVEL=${LOG_LEVEL:-"INFO"}
DATA_PVC_NAME=${DATA_PVC_NAME:-"data-import-export-pvc"}
JOB_HOST_KEY=${JOB_HOST_KEY:-"kubernetes.io/hostname"}
JOB_HOST_NAME=${JOB_HOST_NAME:-"idrc-disney-1"}
# 数据库配置(使用时需要修改)
DB_HOST=${DB_HOST:-"test-db.db.svc.cluster.local"}
DB_PORT=${DB_PORT:-"6432"}
@ -31,6 +33,8 @@ sed -e "s|{{JOB_ID}}|$JOB_ID|g" \
-e "s|{{IMAGE_REPO}}|$IMAGE_REPO|g" \
-e "s|{{IMAGE_TAG}}|$IMAGE_TAG|g" \
-e "s|{{DATA_PVC_NAME}}|$DATA_PVC_NAME|g" \
-e "s|{{JOB_HOST_KEY}}|$JOB_HOST_KEY|g" \
-e "s|{{JOB_HOST_NAME}}|$JOB_HOST_NAME|g" \
-e "s|{{DB_HOST}}|$DB_HOST|g" \
-e "s|{{DB_PORT}}|$DB_PORT|g" \
-e "s|{{DB_NAME}}|$DB_NAME|g" \