chore: sync local latest state and repository cleanup

This commit is contained in:
Your Name
2026-03-23 13:02:36 +08:00
parent f1ff3d629f
commit 2ef0f17961
493 changed files with 46912 additions and 7977 deletions

113
scripts/ci/archive-logs.sh Executable file
View File

@@ -0,0 +1,113 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
APPLY="false"
OLDER_THAN_DAYS="1"
ARCHIVE_TAG_DEFAULT="$(date +%Y%m%d_%H%M%S)"
ARCHIVE_TAG="${ARCHIVE_TAG_DEFAULT}"
PATTERN_PATHS=(
"logs/e2e-automation/run_*.log"
"logs/e2e-automation/report_*.md"
"logs/prd-review/review_*.md"
"logs/prd-review/claude_apply_*.md"
"logs/prd-review/execution_report_*.md"
"logs/prd-review/optimization_report_*.md"
)
usage() {
cat <<'EOF'
Usage:
./scripts/ci/archive-logs.sh [--apply] [--older-than-days N] [--archive-tag TAG]
Options:
--apply Execute archive move. Without this flag, script runs in dry-run mode.
--older-than-days N Archive files older than N days. Default: 1
--archive-tag TAG Archive subdir tag under logs/archive/. Default: timestamp
-h, --help Show help
Examples:
./scripts/ci/archive-logs.sh
./scripts/ci/archive-logs.sh --apply
./scripts/ci/archive-logs.sh --apply --older-than-days 2 --archive-tag weekly_20260323
EOF
}
log() { echo "[archive-logs] $*"; }
run_cmd() {
if [[ "${APPLY}" == "true" ]]; then
"$@"
else
log "DRY-RUN: $*"
fi
}
while [[ $# -gt 0 ]]; do
case "$1" in
--apply)
APPLY="true"
shift
;;
--older-than-days)
OLDER_THAN_DAYS="${2:-}"
shift 2
;;
--archive-tag)
ARCHIVE_TAG="${2:-}"
shift 2
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown option: $1" >&2
usage
exit 1
;;
esac
done
if ! [[ "${OLDER_THAN_DAYS}" =~ ^[0-9]+$ ]]; then
echo "Invalid --older-than-days: ${OLDER_THAN_DAYS}" >&2
exit 1
fi
ARCHIVE_DIR="${ROOT_DIR}/logs/archive/${ARCHIVE_TAG}"
CUTOFF_EPOCH="$(date -d "${OLDER_THAN_DAYS} days ago" +%s)"
log "root=${ROOT_DIR}"
log "apply=${APPLY} older_than_days=${OLDER_THAN_DAYS} archive_dir=${ARCHIVE_DIR}"
log "cutoff=$(date -d "@${CUTOFF_EPOCH}" '+%Y-%m-%d %H:%M:%S')"
if [[ "${APPLY}" == "true" ]]; then
mkdir -p "${ARCHIVE_DIR}"
fi
shopt -s nullglob
moved_count=0
for pattern in "${PATTERN_PATHS[@]}"; do
for abs in "${ROOT_DIR}"/${pattern}; do
[[ -f "${abs}" ]] || continue
mtime_epoch="$(stat -c %Y "${abs}")"
if [[ "${mtime_epoch}" -ge "${CUTOFF_EPOCH}" ]]; then
continue
fi
rel="${abs#${ROOT_DIR}/}"
dest="${ARCHIVE_DIR}/${rel}"
run_cmd mkdir -p "$(dirname "${dest}")"
run_cmd mv "${abs}" "${dest}"
log "ARCHIVE ${rel} -> ${dest}"
moved_count=$((moved_count + 1))
done
done
shopt -u nullglob
log "done: archived=${moved_count}"
if [[ "${APPLY}" != "true" ]]; then
log "dry-run completed. Use --apply to execute."
fi

View File

@@ -0,0 +1,95 @@
#!/usr/bin/env bash
# 断言迁移测试不可跳过
# 在CI环境中PermissionCanonicalMigrationTest 和 AuditLogImmutabilityIntegrationTest 的 Skipped 必须为0
# 否则表示迁移验证未被执行,质量门禁失败
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
REPORT_DIR="${ROOT_DIR}/target/surefire-reports"
# 关键测试类列表strict模式下不可跳过
CRITICAL_TESTS=(
"com.mosquito.project.permission.PermissionCanonicalMigrationTest"
"com.mosquito.project.integration.AuditLogImmutabilityIntegrationTest"
"com.mosquito.project.RolePermissionMigrationTest"
"com.mosquito.project.FlywayMigrationSmokeTest"
)
echo "===== 关键测试跳过断言 ====="
# 检查测试报告目录是否存在
if [[ ! -d "${REPORT_DIR}" ]]; then
echo "ERROR: 测试报告目录不存在: ${REPORT_DIR}"
echo "请先运行 Maven 测试"
exit 1
fi
FAILED=0
for TEST_CLASS in "${CRITICAL_TESTS[@]}"; do
echo ""
echo "检查: ${TEST_CLASS}"
# 查找对应的测试报告文件
REPORT_FILE=""
for ext in txt xml; do
candidate="${REPORT_DIR}/${TEST_CLASS}.${ext}"
if [[ -f "${candidate}" ]]; then
REPORT_FILE="${candidate}"
break
fi
done
if [[ -z "${REPORT_FILE}" ]]; then
echo " ERROR: 未找到测试报告文件: ${TEST_CLASS}.{txt,xml}"
echo " 目录内容:"
ls -la "${REPORT_DIR}" | head -20 || true
FAILED=1
continue
fi
echo " 报告: ${REPORT_FILE}"
# 从XML报告中提取Skipped数量如果存在
if [[ "${REPORT_FILE}" == *.xml ]]; then
if grep -q 'failures="[^"]*" errors="[^"]*" skipped="[^"]*"' "${REPORT_FILE}"; then
SKIPPED=$(grep -oP 'skipped="\K[0-9]+' "${REPORT_FILE}" | head -1)
if [[ "${SKIPPED}" -gt 0 ]]; then
echo " ERROR: ${TEST_CLASS}${SKIPPED} 个被跳过!"
echo " 在CI严格模式下关键测试必须执行。"
echo " 质量门禁失败Skipped 数量必须为0"
FAILED=1
else
echo " PASS: ${TEST_CLASS} 跳过数量为0${SKIPPED}"
fi
else
# 如果XML中没有skipped属性检查是否完全跳过了测试
if grep -q 'tests="0"' "${REPORT_FILE}"; then
echo " ERROR: ${TEST_CLASS} 未被执行tests=\"0\""
FAILED=1
else
echo " INFO: 无法从XML中解析skipped数量假设通过"
fi
fi
elif [[ "${REPORT_FILE}" == *.txt ]]; then
# 从文本报告中提取信息
if grep -q "Skipped" "${REPORT_FILE}"; then
# 检查是否有跳过的测试
if grep "Skipped.*[1-9]" "${REPORT_FILE}"; then
echo " ERROR: ${TEST_CLASS} 有跳过的用例!"
FAILED=1
fi
fi
echo " INFO: 文本报告格式,跳过详细检查"
fi
done
echo ""
if [[ "${FAILED}" -eq 1 ]]; then
echo "===== 关键测试跳过断言失败 ====="
exit 1
fi
echo "===== 关键测试跳过断言通过 ====="
exit 0

62
scripts/ci/backend-verify.sh Executable file
View File

@@ -0,0 +1,62 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
TMP_DIR="${ROOT_DIR}/tmp"
JNA_TMP_DIR="${TMP_DIR}/jna"
JAVA_TMP_DIR="${TMP_DIR}/java"
PODMAN_LOG="${TMP_DIR}/podman-service.log"
PODMAN_SOCK_PATH="/run/user/$(id -u)/podman/podman.sock"
PODMAN_SOCK="unix://${PODMAN_SOCK_PATH}"
PODMAN_PID=""
if ! command -v podman >/dev/null 2>&1; then
echo "ERROR: podman 未安装,无法执行严格模式迁移验证。" >&2
exit 1
fi
mkdir -p "${JNA_TMP_DIR}" "${JAVA_TMP_DIR}"
cleanup() {
if [[ -n "${PODMAN_PID}" ]] && kill -0 "${PODMAN_PID}" >/dev/null 2>&1; then
kill "${PODMAN_PID}" >/dev/null 2>&1 || true
wait "${PODMAN_PID}" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
cd "${ROOT_DIR}"
podman system service --time=0 "${PODMAN_SOCK}" > "${PODMAN_LOG}" 2>&1 &
PODMAN_PID=$!
for _ in {1..30}; do
if [[ -S "${PODMAN_SOCK_PATH}" ]] && podman --url "${PODMAN_SOCK}" info >/dev/null 2>&1; then
break
fi
sleep 1
done
if ! [[ -S "${PODMAN_SOCK_PATH}" ]] || ! podman --url "${PODMAN_SOCK}" info >/dev/null 2>&1; then
echo "ERROR: podman service 未就绪,无法执行严格模式迁移验证。" >&2
if [[ -f "${PODMAN_LOG}" ]]; then
echo "----- podman service log (tail) -----" >&2
tail -n 80 "${PODMAN_LOG}" >&2 || true
fi
exit 1
fi
export DOCKER_HOST="${PODMAN_SOCK}"
export TESTCONTAINERS_RYUK_DISABLED="true"
mvn -B -DskipTests=false -Dmigration.test.strict=true \
-Djna.tmpdir="${JNA_TMP_DIR}" \
-Djava.io.tmpdir="${JAVA_TMP_DIR}" \
clean verify
# 显式执行关键集成测试(之前被默认排除)
echo "=== 执行关键集成测试集合 ==="
mvn -B test -Dtest=UserOperationJourneyTest,CacheConfigIntegrationTest,SchemaVerificationTest \
-DfailIfNoTests=false \
-Djourney.test.enabled=true \
-Djna.tmpdir="${JNA_TMP_DIR}" \
-Djava.io.tmpdir="${JAVA_TMP_DIR}"

View File

@@ -0,0 +1,119 @@
#!/usr/bin/env bash
# 容器运行时检测脚本
# 在执行strict模式测试前检测Docker/Podman是否可用
# 如果不可用,给出明确的修复指令
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
CHECK_DOCKER="${1:-false}"
CHECK_PODMAN="${2:-false}"
echo "===== 容器运行时检测 ====="
echo ""
# 如果未指定检测选项,默认检测两者
if [[ "${CHECK_DOCKER}" == "false" && "${CHECK_PODMAN}" == "false" ]]; then
CHECK_DOCKER="true"
CHECK_PODMAN="true"
fi
DOCKER_AVAILABLE="false"
PODMAN_AVAILABLE="false"
# 检测Docker
if [[ "${CHECK_DOCKER}" == "true" ]]; then
echo "检测 Docker..."
if command -v docker >/dev/null 2>&1; then
if docker info >/dev/null 2>&1; then
echo " ✓ Docker 已安装且运行中"
DOCKER_AVAILABLE="true"
else
echo " ✗ Docker 已安装但无法连接可能需要启动Docker daemon"
echo " 解决方案: sudo systemctl start docker"
fi
else
echo " ✗ Docker 未安装"
fi
fi
# 检测Podman
if [[ "${CHECK_PODMAN}" == "true" ]]; then
echo ""
echo "检测 Podman..."
if command -v podman >/dev/null 2>&1; then
if podman info >/dev/null 2>&1; then
echo " ✓ Podman 已安装且运行中"
PODMAN_AVAILABLE="true"
else
echo " ✗ Podman 已安装但无法连接"
echo " 解决方案: podman system start"
fi
else
echo " ✗ Podman 未安装"
fi
fi
# 检测Docker Socket
echo ""
echo "检测 Docker Socket..."
DOCKER_SOCK="/var/run/docker.sock"
if [[ -S "${DOCKER_SOCK}" ]]; then
echo "${DOCKER_SOCK} 存在"
else
echo "${DOCKER_SOCK} 不存在或不是socket"
fi
# 检测Podman Socket
echo ""
echo "检测 Podman Socket..."
for uid in $(id -u) 0 1000; do
PODMAN_SOCK="/run/user/${uid}/podman/podman.sock"
if [[ -S "${PODMAN_SOCK}" ]]; then
echo "${PODMAN_SOCK} 存在"
PODMAN_AVAILABLE="true"
break
fi
done
if [[ "${PODMAN_AVAILABLE}" != "true" ]]; then
echo " ✗ 未找到可用的Podman socket"
fi
# 最终结论
echo ""
echo "===== 检测结果 ====="
if [[ "${DOCKER_AVAILABLE}" == "true" || "${PODMAN_AVAILABLE}" == "true" ]]; then
echo "✓ 容器运行时可用"
if [[ "${DOCKER_AVAILABLE}" == "true" ]]; then
echo " - Docker: 可用"
export DOCKER_HOST="unix://${DOCKER_SOCK}"
fi
if [[ "${PODMAN_AVAILABLE}" == "true" ]]; then
echo " - Podman: 可用"
export DOCKER_HOST="unix:///run/user/$(id -u)/podman/podman.sock"
fi
echo ""
echo "可以执行 strict 模式测试"
exit 0
else
echo "✗ 无可用的容器运行时"
echo ""
echo "===== 修复指令 ====="
echo "严格模式测试需要Docker或Podman来启动PostgreSQL容器。"
echo ""
echo "方案1: 使用Docker"
echo " 1. 安装Docker: sudo apt install docker.io"
echo " 2. 启动Docker daemon: sudo systemctl start docker"
echo " 3. 将当前用户加入docker组: sudo usermod -aG docker \$(whoami)"
echo " 4. 重新登录或执行: newgrp docker"
echo ""
echo "方案2: 使用Podman推荐无需root"
echo " 1. 安装Podman: sudo apt install podman"
echo " 2. 启动Podman服务: podman system start"
echo ""
echo "方案3: 跳过strict模式测试"
echo " 使用普通Maven测试命令不带 -Dmigration.test.strict=true"
echo " 注意:这将导致关键安全测试被跳过,不推荐用于生产环境"
echo ""
exit 1
fi

237
scripts/ci/clean-artifacts.sh Executable file
View File

@@ -0,0 +1,237 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
ARCHIVE_BASE_DEFAULT="/tmp/mosquito-archives"
ARCHIVE_TAG_DEFAULT="$(date +%Y%m%d_%H%M%S)"
MODE="archive" # archive | delete
APPLY="false" # false => dry-run
INCLUDE_TRACKED="false" # true => include git tracked paths
FAIL_ON_FOUND="false" # true => exit non-zero when cleanup candidates exist
INCLUDE_BUILD_OUTPUTS="false" # true => include dist/target build outputs
ARCHIVE_BASE="${ARCHIVE_BASE_DEFAULT}"
ARCHIVE_TAG="${ARCHIVE_TAG_DEFAULT}"
ARTIFACT_PATHS=(
"frontend/admin/test-results"
"frontend/e2e/e2e-results"
"frontend/e2e/e2e-report"
"frontend/e2e/playwright-report"
"frontend/e2e/test-results"
"frontend/e2e-admin/test-results"
"frontend/e2e-results"
)
BUILD_OUTPUT_PATHS=(
"target"
"frontend/dist"
"frontend/admin/dist"
"frontend/h5/dist"
)
ROOT_SPILLOVER_GLOBS=(
"e2e-test-report-*.md"
"E2E_TEST*.md"
"TEST_E2E_*.md"
"COVERAGE_*.md"
)
usage() {
cat <<'EOF'
Usage:
./scripts/ci/clean-artifacts.sh [--apply] [--mode archive|delete] [--archive-base DIR] [--archive-tag TAG] [--include-tracked] [--include-build-outputs] [--fail-on-found]
Options:
--apply Execute cleanup. Without this flag, script runs in dry-run mode.
--mode MODE archive (default) | delete
--archive-base DIR Archive base dir for archive mode. Default: /tmp/mosquito-archives
--archive-tag TAG Archive subdir tag. Default: current timestamp
--include-tracked Include git tracked paths (default: skip tracked)
--include-build-outputs
Include build output paths (target, frontend/*/dist)
--fail-on-found Exit non-zero when cleanup candidates are found (useful for CI dry-run)
-h, --help Show help
Examples:
./scripts/ci/clean-artifacts.sh
./scripts/ci/clean-artifacts.sh --apply
./scripts/ci/clean-artifacts.sh --apply --mode archive --archive-tag manual_cleanup
./scripts/ci/clean-artifacts.sh --include-build-outputs --apply --mode archive --archive-tag weekly_cleanup
./scripts/ci/clean-artifacts.sh --fail-on-found
EOF
}
log() { echo "[clean-artifacts] $*"; }
is_git_tracked() {
local rel="$1"
local out
out="$(git -C "${ROOT_DIR}" ls-files -- "${rel}" 2>/dev/null || true)"
[[ -n "${out}" ]]
}
run_cmd() {
if [[ "${APPLY}" == "true" ]]; then
"$@"
else
log "DRY-RUN: $*"
fi
}
while [[ $# -gt 0 ]]; do
case "$1" in
--apply)
APPLY="true"
shift
;;
--mode)
MODE="${2:-}"
shift 2
;;
--archive-base)
ARCHIVE_BASE="${2:-}"
shift 2
;;
--archive-tag)
ARCHIVE_TAG="${2:-}"
shift 2
;;
--include-tracked)
INCLUDE_TRACKED="true"
shift
;;
--include-build-outputs)
INCLUDE_BUILD_OUTPUTS="true"
shift
;;
--fail-on-found)
FAIL_ON_FOUND="true"
shift
;;
-h|--help)
usage
exit 0
;;
*)
echo "Unknown option: $1" >&2
usage
exit 1
;;
esac
done
if [[ "${MODE}" != "archive" && "${MODE}" != "delete" ]]; then
echo "Invalid --mode: ${MODE}" >&2
exit 1
fi
ARCHIVE_DIR="${ARCHIVE_BASE}/${ARCHIVE_TAG}"
log "root=${ROOT_DIR}"
log "mode=${MODE} apply=${APPLY} include_tracked=${INCLUDE_TRACKED} include_build_outputs=${INCLUDE_BUILD_OUTPUTS} fail_on_found=${FAIL_ON_FOUND}"
if [[ "${INCLUDE_BUILD_OUTPUTS}" == "true" ]] && pgrep -af "spring-boot:run|e2e_continuous_runner.sh|vite" >/dev/null 2>&1; then
log "WARN: detected active dev/e2e processes; build outputs may be recreated immediately."
fi
if [[ "${MODE}" == "archive" ]]; then
log "archive_dir=${ARCHIVE_DIR}"
if [[ "${APPLY}" == "true" ]]; then
mkdir -p "${ARCHIVE_DIR}"
fi
fi
cleaned_count=0
skipped_count=0
found_count=0
CANDIDATE_PATHS=("${ARTIFACT_PATHS[@]}")
if [[ "${INCLUDE_BUILD_OUTPUTS}" == "true" ]]; then
CANDIDATE_PATHS+=("${BUILD_OUTPUT_PATHS[@]}")
fi
for rel in "${CANDIDATE_PATHS[@]}"; do
abs="${ROOT_DIR}/${rel}"
if [[ ! -e "${abs}" ]]; then
continue
fi
if [[ "${INCLUDE_TRACKED}" != "true" ]] && is_git_tracked "${rel}"; then
log "SKIP tracked path: ${rel}"
skipped_count=$((skipped_count + 1))
continue
fi
if [[ "${MODE}" == "archive" ]]; then
dest="${ARCHIVE_DIR}/${rel}"
run_cmd mkdir -p "$(dirname "${dest}")"
run_cmd mv "${abs}" "${dest}"
log "ARCHIVE ${rel} -> ${dest}"
else
run_cmd rm -rf "${abs}"
log "DELETE ${rel}"
fi
found_count=$((found_count + 1))
cleaned_count=$((cleaned_count + 1))
done
# Root report spillover (files in repository root)
shopt -s nullglob
for pattern in "${ROOT_SPILLOVER_GLOBS[@]}"; do
for file in "${ROOT_DIR}"/${pattern}; do
[[ -f "${file}" ]] || continue
rel="$(basename "${file}")"
if [[ "${INCLUDE_TRACKED}" != "true" ]] && is_git_tracked "${rel}"; then
log "SKIP tracked root file: ${rel}"
skipped_count=$((skipped_count + 1))
continue
fi
if [[ "${MODE}" == "archive" ]]; then
dest="${ARCHIVE_DIR}/root-spillover/${rel}"
run_cmd mkdir -p "$(dirname "${dest}")"
run_cmd mv "${file}" "${dest}"
log "ARCHIVE ${rel} -> ${dest}"
else
run_cmd rm -f "${file}"
log "DELETE ${rel}"
fi
found_count=$((found_count + 1))
cleaned_count=$((cleaned_count + 1))
done
done
shopt -u nullglob
# Move root attach_pid files into tmp/pids
pids_dir="${ROOT_DIR}/tmp/pids"
if compgen -G "${ROOT_DIR}/.attach_pid*" > /dev/null; then
run_cmd mkdir -p "${pids_dir}"
while IFS= read -r pid_file; do
rel_pid="$(basename "${pid_file}")"
if [[ "${MODE}" == "archive" ]]; then
if [[ "${APPLY}" == "true" ]]; then
mv "${pid_file}" "${pids_dir}/${rel_pid}"
else
log "DRY-RUN: mv ${pid_file} ${pids_dir}/${rel_pid}"
fi
log "MOVE ${rel_pid} -> tmp/pids/"
else
run_cmd rm -f "${pid_file}"
log "DELETE ${rel_pid}"
fi
found_count=$((found_count + 1))
cleaned_count=$((cleaned_count + 1))
done < <(find "${ROOT_DIR}" -maxdepth 1 -type f -name ".attach_pid*" | sort)
fi
log "done: found=${found_count}, cleaned=${cleaned_count}, skipped=${skipped_count}"
if [[ "${APPLY}" != "true" ]]; then
log "dry-run completed. Use --apply to execute."
fi
if [[ "${FAIL_ON_FOUND}" == "true" && "${found_count}" -gt 0 ]]; then
log "fail-on-found enabled: detected ${found_count} cleanup candidates."
exit 2
fi

70
scripts/ci/logs-health-check.sh Executable file
View File

@@ -0,0 +1,70 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
LOGS_DIR="${ROOT_DIR}/logs"
OLDER_THAN_DAYS="${OLDER_THAN_DAYS:-1}"
WARN_TOTAL_MB="${WARN_TOTAL_MB:-300}"
WARN_CANDIDATE_FILES="${WARN_CANDIDATE_FILES:-500}"
# Keep in sync with archive-logs.sh patterns.
PATTERN_PATHS=(
"logs/e2e-automation/run_*.log"
"logs/e2e-automation/report_*.md"
"logs/prd-review/review_*.md"
"logs/prd-review/claude_apply_*.md"
"logs/prd-review/execution_report_*.md"
"logs/prd-review/optimization_report_*.md"
)
log() { echo "[logs-health] $*"; }
to_mb() {
local bytes="$1"
awk -v b="${bytes}" 'BEGIN { printf "%.2f", b / 1024 / 1024 }'
}
if [[ ! -d "${LOGS_DIR}" ]]; then
log "logs directory not found, skip."
exit 0
fi
cutoff_epoch="$(date -d "${OLDER_THAN_DAYS} days ago" +%s)"
total_bytes="$(du -sb "${LOGS_DIR}" | awk '{print $1}')"
total_mb="$(to_mb "${total_bytes}")"
candidate_files=0
shopt -s nullglob
for pattern in "${PATTERN_PATHS[@]}"; do
for abs in "${ROOT_DIR}"/${pattern}; do
[[ -f "${abs}" ]] || continue
mtime_epoch="$(stat -c %Y "${abs}")"
if [[ "${mtime_epoch}" -lt "${cutoff_epoch}" ]]; then
candidate_files=$((candidate_files + 1))
fi
done
done
shopt -u nullglob
log "total_size_mb=${total_mb} (threshold=${WARN_TOTAL_MB})"
log "archive_candidates_older_than_${OLDER_THAN_DAYS}d=${candidate_files} (threshold=${WARN_CANDIDATE_FILES})"
if awk -v a="${total_mb}" -v b="${WARN_TOTAL_MB}" 'BEGIN { exit !(a > b) }'; then
log "WARN: logs directory is large. Consider: npm run logs:archive:apply"
fi
if [[ "${candidate_files}" -gt "${WARN_CANDIDATE_FILES}" ]]; then
log "WARN: many archive candidates detected. Consider archiving historical logs."
fi
log "top 5 largest log files:"
find "${LOGS_DIR}" -type f -printf '%s %p\n' | sort -nr | sed -n '1,5p' | awk '
{
mb = $1 / 1024 / 1024
$1 = ""
sub(/^ /, "", $0)
printf " - %.2f MB %s\n", mb, $0
}
'
exit 0

239
scripts/ci/prd-gap-check.sh Executable file
View File

@@ -0,0 +1,239 @@
#!/usr/bin/env bash
#
# PRD-实现差距自动化检查脚本
# 生成可读的PRD差距报告包含失败项和证据路径
#
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR="$(cd "${SCRIPT_DIR}/../.." && pwd)"
TMP_DIR="${ROOT_DIR}/tmp/prd-gap-report"
REPORT_FILE="${TMP_DIR}/prd-gap-report-$(date +%Y%m%d_%H%M%S).md"
JAVA_TMP_DIR="${TMP_DIR}/java"
JNA_TMP_DIR="${TMP_DIR}/jna"
PODMAN_SOCK_PATH="/run/user/$(id -u)/podman/podman.sock"
PODMAN_SOCK="unix://${PODMAN_SOCK_PATH}"
PODMAN_LOG="${TMP_DIR}/podman-service.log"
PODMAN_PID=""
# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
mkdir -p "${TMP_DIR}" "${JAVA_TMP_DIR}" "${JNA_TMP_DIR}"
cleanup() {
if [[ -n "${PODMAN_PID}" ]] && kill -0 "${PODMAN_PID}" >/dev/null 2>&1; then
kill "${PODMAN_PID}" >/dev/null 2>&1 || true
wait "${PODMAN_PID}" >/dev/null 2>&1 || true
fi
}
trap cleanup EXIT
# 初始化Podman如果可用
init_podman() {
if ! command -v podman >/dev/null 2>&1; then
echo -e "${YELLOW}WARNING: podman 未安装,跳过容器测试${NC}" >&2
return 1
fi
mkdir -p "$(dirname "${PODMAN_SOCK_PATH}")"
podman system service --time=0 "${PODMAN_SOCK}" > "${PODMAN_LOG}" 2>&1 &
PODMAN_PID=$!
for _ in {1..30}; do
if [[ -S "${PODMAN_SOCK_PATH}" ]] && podman --url "${PODMAN_SOCK}" info >/dev/null 2>&1; then
echo -e "${GREEN}Podman service 就绪${NC}"
return 0
fi
sleep 1
done
echo -e "${RED}ERROR: podman service 未就绪${NC}" >&2
return 1
}
# 写入报告头
write_report_header() {
cat > "${REPORT_FILE}" << 'EOF'
# PRD-实现差距报告
> 自动生成时间: TIMESTAMP
> 分支: BRANCH
> 提交: COMMIT
## 执行摘要
| 检查项 | 状态 | 证据路径 |
|--------|------|----------|
EOF
sed -i "s/TIMESTAMP/$(date '+%Y-%m-%d %H:%M:%S')/g" "${REPORT_FILE}"
sed -i "s/BRANCH/$(git rev-parse --abbrev-ref HEAD 2>/dev/null || echo 'unknown')/g" "${REPORT_FILE}"
sed -i "s/COMMIT/$(git rev-parse HEAD 2>/dev/null || echo 'unknown')/g" "${REPORT_FILE}"
}
# 添加检查结果到报告
add_check_result() {
local name="$1"
local status="$2"
local evidence="$3"
local details="$4"
local status_icon
if [[ "${status}" == "PASS" ]]; then
status_icon="✅"
elif [[ "${status}" == "FAIL" ]]; then
status_icon="❌"
else
status_icon="⚠️"
fi
cat >> "${REPORT_FILE}" << EOF |
| ${name} | ${status_icon} ${status} | ${evidence} |
EOF
if [[ -n "${details}" ]]; then
cat >> "${REPORT_FILE}" << EOF
<details>
<summary>详细信息</summary>
\`\`\`
${details}
\`\`\`
</details>
EOF
fi
}
# 运行单个测试类并捕获结果
run_test() {
local test_name="$1"
local test_class="$2"
# 清理test_class中的#和后续方法名,只保留类名作为文件路径
local clean_class="${test_class%%#*}"
local evidence_path="${TMP_DIR}/test-results/${clean_class}.txt"
mkdir -p "$(dirname "${evidence_path}")"
echo -e "\n${YELLOW}运行测试: ${test_name}${NC}"
local start_time=$(date +%s)
local exit_code=0
if [[ -n "${PODMAN_SOCK}" ]] && [[ -S "${PODMAN_SOCK_PATH}" ]]; then
export DOCKER_HOST="${PODMAN_SOCK}"
fi
export TESTCONTAINERS_RYUK_DISABLED="true"
export JNA_TMPDIR="${JNA_TMP_DIR}"
export JAVA_IO_TMPDIR="${JAVA_TMP_DIR}"
mvn -B test -Dtest="${test_class}" \
-Djna.tmpdir="${JNA_TMP_DIR}" \
-Djava.io.tmpdir="${JAVA_TMP_DIR}" \
-Dmigration.test.strict=true \
-Dsurefire.failIfNoSpecifiedTests=true \
2>&1 | tee "${evidence_path}" || exit_code=$?
local end_time=$(date +%s)
local duration=$((end_time - start_time))
local result
if [[ ${exit_code} -eq 0 ]]; then
result="PASS"
else
result="FAIL"
fi
echo -e "${result}: ${test_name} (${duration}s)"
# 只输出证据路径到stdout不输出其他内容
echo "${evidence_path}"
}
# 主流程
main() {
echo -e "${GREEN}====== PRD-实现差距检查 ======${NC}"
echo "报告输出目录: ${TMP_DIR}"
# 初始化Podman
if init_podman; then
export DOCKER_HOST="${PODMAN_SOCK}"
fi
# 生成报告头
write_report_header
# 定义要运行的PRD关键测试
declare -a TEST_CLASSES=(
"AuditLogImmutabilityIntegrationTest"
"PermissionCanonicalMigrationTest#shouldValidateCanonicalPermissionsAgainstBaseline"
"PermissionCanonicalMigrationTest#shouldHaveZeroLegacyPermissionCodes"
)
local failed_count=0
local passed_count=0
for test_spec in "${TEST_CLASSES[@]}"; do
IFS='#' read -r test_class test_method <<< "${test_spec}"
local evidence_path
if [[ -n "${test_method}" ]]; then
evidence_path=$(run_test "${test_spec}" "${test_class}#${test_method}")
else
evidence_path=$(run_test "${test_spec}" "${test_class}")
fi
if grep -q "BUILD SUCCESS" "${evidence_path}" 2>/dev/null; then
add_check_result "${test_spec}" "PASS" "${evidence_path}" ""
((passed_count++))
else
local details=$(tail -50 "${evidence_path}" 2>/dev/null || echo "无日志")
add_check_result "${test_spec}" "FAIL" "${evidence_path}" "${details}"
((failed_count++))
fi
done
# 添加后端构建检查
echo -e "\n${YELLOW}运行后端构建检查${NC}"
local build_exit_code=0
local build_log="${TMP_DIR}/maven-build.txt"
mvn -B clean compile -DskipTests 2>&1 | tee "${build_log}" || build_exit_code=$?
if [[ ${build_exit_code} -eq 0 ]]; then
add_check_result "Maven构建" "PASS" "${build_log}" ""
((passed_count++))
else
local details=$(tail -50 "${build_log}" 2>/dev/null || echo "无日志")
add_check_result "Maven构建" "FAIL" "${build_log}" "${details}"
((failed_count++))
fi
# 生成总结
cat >> "${REPORT_FILE}" << EOF
## 总结
- 通过: ${passed_count}
- 失败: ${failed_count}
- 生成时间: $(date '+%Y-%m-%d %H:%M:%S')
EOF
echo -e "\n${GREEN}====== 检查完成 ======${NC}"
echo -e "通过: ${GREEN}${passed_count}${NC}"
echo -e "失败: ${RED}${failed_count}${NC}"
echo -e "报告: ${REPORT_FILE}"
if [[ ${failed_count} -gt 0 ]]; then
exit 1
fi
exit 0
}
main "$@"

View File

@@ -0,0 +1,93 @@
#!/usr/bin/env bash
set -euo pipefail
ROOT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/../.." && pwd)"
ARCHIVE_ROOT="${ROOT_DIR}/logs/archive"
OUTPUT_FILE="${ARCHIVE_ROOT}/README.md"
GENERATED_AT="$(date '+%Y-%m-%d %H:%M:%S %Z')"
fmt_epoch() {
local epoch="$1"
date -d "@${epoch}" '+%Y-%m-%d %H:%M:%S'
}
list_batches() {
find "${ARCHIVE_ROOT}" -mindepth 1 -maxdepth 1 -type d -printf '%f\n' | sort -r
}
if [[ ! -d "${ARCHIVE_ROOT}" ]]; then
mkdir -p "${ARCHIVE_ROOT}"
fi
{
echo "# 日志归档索引"
echo
echo "本文件由 \`scripts/ci/update-log-archive-index.sh\` 自动生成。"
echo
echo "- 生成时间: ${GENERATED_AT}"
echo "- 归档根目录: \`logs/archive/\`"
echo
mapfile -t batches < <(list_batches)
if [[ "${#batches[@]}" -eq 0 ]]; then
echo "当前没有可用归档批次。"
exit 0
fi
echo "## 批次总览"
echo
echo "| 批次 | 文件数 | 体积 | 最早文件时间 | 最晚文件时间 |"
echo "|---|---:|---:|---|---|"
for batch in "${batches[@]}"; do
batch_dir="${ARCHIVE_ROOT}/${batch}"
file_count="$(find "${batch_dir}" -type f | wc -l)"
size="$(du -sh "${batch_dir}" | awk '{print $1}')"
if [[ "${file_count}" -eq 0 ]]; then
earliest="-"
latest="-"
else
read -r earliest_epoch latest_epoch < <(
find "${batch_dir}" -type f -printf '%T@\n' | awk '
NR == 1 { min = $1; max = $1 }
{ if ($1 < min) min = $1; if ($1 > max) max = $1 }
END { printf "%d %d\n", min, max }
'
)
earliest="$(fmt_epoch "${earliest_epoch}")"
latest="$(fmt_epoch "${latest_epoch}")"
fi
echo "| \`${batch}\` | ${file_count} | ${size} | ${earliest} | ${latest} |"
done
echo
echo "## 子系统明细"
echo
for batch in "${batches[@]}"; do
batch_dir="${ARCHIVE_ROOT}/${batch}"
logs_dir="${batch_dir}/logs"
echo "### ${batch}"
echo
if [[ ! -d "${logs_dir}" ]]; then
echo "_未发现 \`logs/\` 子目录_"
echo
continue
fi
echo "| 子系统目录 | 文件数 | 体积 |"
echo "|---|---:|---:|"
while IFS= read -r subdir; do
sub_name="${subdir#${logs_dir}/}"
sub_count="$(find "${subdir}" -type f | wc -l)"
sub_size="$(du -sh "${subdir}" | awk '{print $1}')"
echo "| \`${sub_name}\` | ${sub_count} | ${sub_size} |"
done < <(find "${logs_dir}" -mindepth 1 -maxdepth 1 -type d | sort)
echo
done
} > "${OUTPUT_FILE}"
echo "[update-log-archive-index] generated ${OUTPUT_FILE}"